diff --git a/.github/workflows/lint-test.yaml b/.github/workflows/lint-test.yaml index b56e5f696a92..9885ab933060 100644 --- a/.github/workflows/lint-test.yaml +++ b/.github/workflows/lint-test.yaml @@ -21,7 +21,7 @@ jobs: python-version: 3.7 - name: Set up chart-testing - uses: helm/chart-testing-action@v2.3.1 + uses: helm/chart-testing-action@v3.10.1 - name: Run chart-testing (list-changed) id: list-changed diff --git a/.gitignore b/.gitignore index e58055faf9c7..c570b769ad5c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,5 @@ # Chart dependencies -/charts/*/charts +#/charts/*/charts .idea .vscode ks-prometheus/jsonnetfile.lock.json diff --git a/charts/prometheus-node-exporter/Chart.yaml b/charts/prometheus-node-exporter/Chart.yaml index 3c5c932fe2b8..adc5ad8ea40d 100644 --- a/charts/prometheus-node-exporter/Chart.yaml +++ b/charts/prometheus-node-exporter/Chart.yaml @@ -23,3 +23,12 @@ annotations: "artifacthub.io/links": | - name: Chart Source url: https://github.com/prometheus-community/helm-charts +dependencies: +- condition: ProcessExporter.enabled + name: ProcessExporter + repository: file://prometheus-process-exporter + version: 0.5.2 +- condition: CalicoExporter.enabled + name: CalicoExporter + repository: file://calico-exporter + version: 0.2.0 diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/.helmignore b/charts/prometheus-node-exporter/charts/calico-exporter/.helmignore new file mode 100644 index 000000000000..691fa13d6a54 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ \ No newline at end of file diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/Chart.yaml b/charts/prometheus-node-exporter/charts/calico-exporter/Chart.yaml new file mode 100644 index 000000000000..3e3cd8c4434f --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/Chart.yaml @@ -0,0 +1,32 @@ +apiVersion: v2 +name: CalicoExporter +description: A Helm chart for Calico Exporter on Kubernetes +# Specify the Kubernetes version range that we support. +# We allow pre-release versions for cloud-specific Kubernetes versions such as v1.21.5-gke.1302 or v1.18.9-eks-d1db3c +kubeVersion: ">=v1.19.0-0" +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.2.0 +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.2.0" +keywords: + - calico + - prometheus + - exporter +maintainers: + - email: wangyifei@kubesphere.io + name: wangyifei + diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/README.md b/charts/prometheus-node-exporter/charts/calico-exporter/README.md new file mode 100644 index 000000000000..9d5d1d755e9e --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/README.md @@ -0,0 +1 @@ +# calico-exporter diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/rules/calico.yaml b/charts/prometheus-node-exporter/charts/calico-exporter/rules/calico.yaml new file mode 100644 index 000000000000..028be1a5c1dd --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/rules/calico.yaml @@ -0,0 +1,57 @@ +apiVersion: alerting.kubesphere.io/v2beta1 +kind: GlobalRuleGroup +metadata: + annotations: + alerting.kubesphere.io/initial-configuration: | + {"apiVersion":"alerting.kubesphere.io/v2beta1","kind":"GlobalRuleGroup","metadata":{"annotations":{},"labels":{"alerting.kubesphere.io/builtin":"true","alerting.kubesphere.io/enable":"true"},"name":"calico-bgp"},"spec":{"rules":[{"alert":"CalicoBGPRouterDown","annotations":{"description":"Calico BGP router {{ $labels.instance }} ({{ $labels.router_id }}) is down.","runbook_url":"https://alert-runbooks.kubesphere.io/runbooks/calico/calicobgprouterdown","summary":"Calico BGP router is down."},"expr":"calico_bgp_router_info{up=\"false\"} == 1\n","for":"5m","labels":{"rule_id":"6a9f0b8a4c7e4c9d8f3b5a6c0d8f9e7b"},"severity":"critical"},{"alert":"CalicoBGPPeerStateAbnormal","annotations":{"description":"Calico BGP peer {{ $labels.name }} ({{ $labels.ip }}) is not in established state.","runbook_url":"https://alert-runbooks.kubesphere.io/runbooks/calico/calicobgppeerstateabnormal","summary":"Calico BGP peer state is abnormal."},"expr":"calico_bgp_peer_info{bgp_state!=\"Established\"} == 1\n","for":"5m","labels":{"rule_id":"8f7c4f0a9d6a4b0c9f3a7b8c6d7f8e9c"},"severity":"warning"},{"alert":"CalicoBGPPeerBirdStateDown","annotations":{"description":"Calico BGP peer {{ $labels.name }} ({{ $labels.ip }}) has bird state down.","runbook_url":"https://alert-runbooks.kubesphere.io/runbooks/calico/calicobgppeerbirdstatedown","summary":"Calico BGP peer bird state is down."},"expr":"calico_bgp_peer_info{bird_state!=\"up\"} == 1\n","for":"5m","labels":{"rule_id":"9d6b4e1a8c7b4c0d8f3b7a6c0d8f9e7d"},"severity":"critical"},{"alert":"CalicoIPPoolUsageHigh","annotations":{"description":"Calico IP pool {{ $labels.name }} ({{ $labels.cidr }}) has {{ $value | humanizePercentage }} of IPs allocated.","runbook_url":"https://alert-runbooks.kubesphere.io/runbooks/calico/calicopoolusagehigh","summary":"Calico IP pool usage is high."},"expr":"calico_ippool_allocated_ips / calico_ippool_capacity \u003e= 0.9\n","for":"5m","labels":{"rule_id":"a7b8c9d0e1f2g3h4i5j6k7l8m9n0o1p2"},"severity":"warning"}]}} + labels: + alerting.kubesphere.io/builtin: "true" + alerting.kubesphere.io/enable: "true" + name: calico-bgp + namespace: kubesphere-monitoring-system +spec: + rules: + - alert: CalicoBGPRouterDown + annotations: + description: 'Calico BGP router {{ $labels.instance }} ({{ $labels.router_id }}) is down.' + runbook_url: https://alert-runbooks.kubesphere.io/runbooks/calico/calicobgprouterdown + summary: Calico BGP router is down. + expr: | + calico_bgp_router_info{up="false"} == 1 + for: 5m + labels: + rule_id: 6a9f0b8a4c7e4c9d8f3b5a6c0d8f9e7b + severity: critical + - alert: CalicoBGPPeerStateAbnormal + annotations: + description: 'Calico BGP peer {{ $labels.name }} ({{ $labels.ip }}) is not in established state.' + runbook_url: https://alert-runbooks.kubesphere.io/runbooks/calico/calicobgppeerstateabnormal + summary: Calico BGP peer state is abnormal. + expr: | + calico_bgp_peer_info{bgp_state!="Established"} == 1 + for: 5m + labels: + rule_id: 8f7c4f0a9d6a4b0c9f3a7b8c6d7f8e9c + severity: warning + - alert: CalicoBGPPeerBirdStateDown + annotations: + description: 'Calico BGP peer {{ $labels.name }} ({{ $labels.ip }}) has bird state down.' + runbook_url: https://alert-runbooks.kubesphere.io/runbooks/calico/calicobgppeerbirdstatedown + summary: Calico BGP peer bird state is down. + expr: | + calico_bgp_peer_info{bird_state!="up"} == 1 + for: 5m + labels: + rule_id: 9d6b4e1a8c7b4c0d8f3b7a6c0d8f9e7d + severity: critical + - alert: CalicoIPPoolUsageHigh + annotations: + description: 'Calico IP pool {{ $labels.name }} ({{ $labels.cidr }}) has {{ $value | humanizePercentage }} of IPs allocated.' + runbook_url: https://alert-runbooks.kubesphere.io/runbooks/calico/calicopoolusagehigh + summary: Calico IP pool usage is high. + expr: | + calico_ippool_allocated_ips / calico_ippool_capacity >= 0.9 + for: 5m + labels: + rule_id: a7b8c9d0e1f2g3h4i5j6k7l8m9n0o1p2 + severity: warning \ No newline at end of file diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/templates/_capabilities.tpl b/charts/prometheus-node-exporter/charts/calico-exporter/templates/_capabilities.tpl new file mode 100644 index 000000000000..11510487ef61 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/templates/_capabilities.tpl @@ -0,0 +1,150 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/templates/_daemonset.tpl b/charts/prometheus-node-exporter/charts/calico-exporter/templates/_daemonset.tpl new file mode 100644 index 000000000000..cc8fdc4e090e --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/templates/_daemonset.tpl @@ -0,0 +1,46 @@ +{{/* +# Containers for the calico-exporter daemonset. +*/}} +{{- define "calico-exporter.daemonset.containers" -}} +- args: + - --logtostderr + - --secure-listen-address=0.0.0.0:{{ .Values.CalicoExporter.service.targetPort }} + - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + - --upstream=http://127.0.0.1:{{ .Values.CalicoExporter.service.innerPort }}/ + image: "{{ .Values.CalicoExporter.kubeRbacProxy.image }}:{{ .Values.CalicoExporter.kubeRbacProxy.tag }}" + name: kube-rbac-proxy-calico-exporter + ports: + - containerPort: {{ .Values.CalicoExporter.service.targetPort }} + name: https-metrics + resources: +{{ toYaml .Values.CalicoExporter.kubeRbacProxy.resources | indent 12 }} + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 +- name: calico-exporter + env: + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: "{{ .Values.CalicoExporter.image.repository }}:{{ .Values.CalicoExporter.image.tag }}" + imagePullPolicy: {{ .Values.CalicoExporter.image.pullPolicy }} + args: + - --web.listen-address=127.0.0.1:{{ .Values.CalicoExporter.service.innerPort }} + - --collector.enable-collectors=bgp + resources: +{{ toYaml .Values.CalicoExporter.resources | indent 12 }} + volumeMounts: + - name: var-run-calico + mountPath: /var/run/calico +{{- end }} + +{{/* +# Volumes for the calico-exporter daemonset. +*/}} +{{- define "calico-exporter.daemonset.volumes" -}} +- name: var-run-calico + hostPath: + path: /var/run/calico +{{- end }} diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/templates/_helpers.tpl b/charts/prometheus-node-exporter/charts/calico-exporter/templates/_helpers.tpl new file mode 100644 index 000000000000..28c76680dbc8 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/templates/_helpers.tpl @@ -0,0 +1,107 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "calico-exporter.name" -}} +{{- if hasKey .Values "CalicoExporter" -}} +{{- default .Chart.Name .Values.CalicoExporter.nameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "calico-exporter.fullname" -}} +{{- if hasKey .Values "CalicoExporter" -}} +{{- if .Values.CalicoExporter.fullnameOverride }} +{{- .Values.CalicoExporter.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.CalicoExporter.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- else }} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "calico-exporter.serviceAccountName" -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "calico-exporter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "calico-exporter.labels" }} +helm.sh/chart: {{ include "calico-exporter.chart" . }} +{{- include "calico-exporter.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ printf "%s%s" "v" .Chart.AppVersion | quote}} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{- define "calico-exporter.bgp-collector.labels" }} +{{- include "calico-exporter.labels" . }} +app.kubernetes.io/component: bgp-collector +{{- end }} + +{{- define "calico-exporter.ippool-collector.labels" }} +{{- include "calico-exporter.labels" . }} +app.kubernetes.io/component: ippool-collector +{{- end }} + +{{- define "calico-exporter.kube-controllers.labels" }} +{{- include "calico-exporter.labels" . }} +app.kubernetes.io/component: calico-kube-controllers +k8s-app: calico-kube-controllers +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "calico-exporter.selectorLabels" }} +app.kubernetes.io/name: {{ include "calico-exporter.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{- define "calico-exporter.bgp-collector.selectorLabels" }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels }} +{{- end }} +{{- end }} + +{{- define "calico-exporter.ippool-collector.selectorLabels" }} +{{- include "calico-exporter.selectorLabels" . }} +app.kubernetes.io/component: ippool-collector +{{- end }} + +{{- define "calico-exporter.kube-controllers.selectorLabels" }} +k8s-app: calico-kube-controllers +{{- end }} diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/templates/alertRuleGroups.yaml b/charts/prometheus-node-exporter/charts/calico-exporter/templates/alertRuleGroups.yaml new file mode 100644 index 000000000000..d1e424a45c74 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/templates/alertRuleGroups.yaml @@ -0,0 +1,2 @@ +{{ .Files.Get "rules/calico.yaml" }} +--- \ No newline at end of file diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/templates/clusterrole.yaml b/charts/prometheus-node-exporter/charts/calico-exporter/templates/clusterrole.yaml new file mode 100644 index 000000000000..c1f994f34497 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/templates/clusterrole.yaml @@ -0,0 +1,69 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "calico-exporter.fullname" . }}-calicoctl + labels: + {{- include "calico-exporter.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: + - namespaces + - nodes + verbs: + - get + - list + - apiGroups: [""] + resources: + - pods + - serviceaccounts + verbs: + - get + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgppeers + - bgpconfigurations + - clusterinformations + - felixconfigurations + - globalnetworkpolicies + - globalnetworksets + - ippools + - ipreservations + - kubecontrollersconfigurations + - networkpolicies + - networksets + - hostendpoints + - ipamblocks + - blockaffinities + - ipamhandles + - ipamconfigs + verbs: + - get + - list + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - get + - list + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "calico-exporter.fullname" . }}-auth + labels: + {{- include "calico-exporter.labels" . | nindent 4 }} +rules: + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create \ No newline at end of file diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/templates/clusterrolebinding.yaml b/charts/prometheus-node-exporter/charts/calico-exporter/templates/clusterrolebinding.yaml new file mode 100644 index 000000000000..112fe3f91c6e --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/templates/clusterrolebinding.yaml @@ -0,0 +1,29 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "calico-exporter.fullname" . }}-calicoctl + labels: + {{- include "calico-exporter.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "calico-exporter.fullname" . }}-calicoctl +subjects: + - kind: ServiceAccount + name: {{ template "calico-exporter.serviceAccountName" . }} + namespace: '{{ .Release.Namespace }}' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "calico-exporter.fullname" . }}-auth + labels: + {{- include "calico-exporter.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "calico-exporter.fullname" . }}-auth +subjects: + - kind: ServiceAccount + name: {{ template "calico-exporter.serviceAccountName" . }} + namespace: '{{ .Release.Namespace }}' \ No newline at end of file diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/templates/deployment.yaml b/charts/prometheus-node-exporter/charts/calico-exporter/templates/deployment.yaml new file mode 100644 index 000000000000..17e710e23503 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/templates/deployment.yaml @@ -0,0 +1,70 @@ +#file: noinspection KubernetesUnknownValues +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "calico-exporter.fullname" . }}-ippool + labels: {{ include "calico-exporter.ippool-collector.labels" . | indent 4 }} +spec: + selector: + matchLabels: {{ include "calico-exporter.ippool-collector.selectorLabels" . | indent 8 }} + template: + metadata: + labels: {{ include "calico-exporter.ippool-collector.labels" . | indent 8 }} + spec: + serviceAccountName: {{ template "calico-exporter.serviceAccountName" . }} +{{- if .Values.securityContext }} + securityContext: +{{ toYaml .Values.securityContext | indent 8 }} +{{- end }} + containers: + - args: + - --logtostderr + - --secure-listen-address=0.0.0.0:{{ .Values.service.targetPort }} + - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + - --upstream=http://127.0.0.1:{{ .Values.service.innerPort }}/ + image: "{{ .Values.kubeRbacProxy.image }}:{{ .Values.kubeRbacProxy.tag }}" + name: kube-rbac-proxy + ports: + - containerPort: {{ .Values.service.targetPort }} + name: https-metrics + resources: +{{ toYaml .Values.kubeRbacProxy.resources | indent 12 }} + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + - name: calico-exporter + env: + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - --web.listen-address=127.0.0.1:{{ .Values.service.innerPort }} + - --collector.enable-collectors=ippool + resources: +{{ toYaml .Values.resources | indent 12 }} + volumeMounts: + - name: var-run-calico + mountPath: /var/run/calico +{{- if .Values.hostNetwork }} + hostNetwork: {{ .Values.hostNetwork }} +{{- end }} +{{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} +{{- end }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: var-run-calico + hostPath: + path: /var/run/calico \ No newline at end of file diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/templates/service.yaml b/charts/prometheus-node-exporter/charts/calico-exporter/templates/service.yaml new file mode 100644 index 000000000000..842eb15e1ab9 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/templates/service.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "calico-exporter.fullname" . }}-bgp + labels: + {{- include "calico-exporter.bgp-collector.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - name: https-metrics + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + selector: + {{- include "calico-exporter.bgp-collector.selectorLabels" . | nindent 4 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "calico-exporter.fullname" . }}-ippool + labels: + {{- include "calico-exporter.ippool-collector.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - name: https-metrics + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + selector: + {{- include "calico-exporter.ippool-collector.selectorLabels" . | nindent 4 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "calico-exporter.fullname" . }}-ipam + labels: + {{- include "calico-exporter.kube-controllers.labels" . | nindent 4 }} + namespace: kube-system +spec: + type: {{ .Values.calicoService.type }} + ports: + - name: http-metrics + port: {{ .Values.calicoService.port }} + targetPort: {{ .Values.calicoService.targetPort }} + selector: + {{- include "calico-exporter.kube-controllers.selectorLabels" . | nindent 4 }} \ No newline at end of file diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/templates/servicemonitor.yaml b/charts/prometheus-node-exporter/charts/calico-exporter/templates/servicemonitor.yaml new file mode 100644 index 000000000000..bb0676655c35 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/templates/servicemonitor.yaml @@ -0,0 +1,104 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + {{- include "calico-exporter.bgp-collector.labels" . | nindent 4 }} + {{- if .Values.serviceMonitor.labels }} + {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} + {{- end }} + name: {{ include "calico-exporter.fullname" . }}-bgp +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + scrapeTimeout: "{{ .Values.serviceMonitor.scrapeTimeout }}" + interval: "{{ .Values.serviceMonitor.interval }}" + honorLabels: true + port: https-metrics + path: /metrics + scheme: https + relabelings: + - action: replace + regex: (.*) + replacement: $1 + sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: instance + tlsConfig: + insecureSkipVerify: true + jobLabel: "{{ .Release.Name }}-bgp" + selector: + matchLabels: + app.kubernetes.io/component: bgp-collector + {{- include "calico-exporter.selectorLabels" . | nindent 6 }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + {{- include "calico-exporter.ippool-collector.labels" . | nindent 4 }} + {{- if .Values.serviceMonitor.labels }} + {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} + {{- end }} + name: {{ include "calico-exporter.fullname" . }}-ippool +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + scrapeTimeout: "{{ .Values.serviceMonitor.scrapeTimeout }}" + interval: "{{ .Values.serviceMonitor.interval }}" + honorLabels: true + port: https-metrics + path: /metrics + scheme: https + relabelings: + - action: replace + regex: (.*) + replacement: $1 + sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: instance + tlsConfig: + insecureSkipVerify: true + jobLabel: "{{ .Release.Name }}-ippool" + selector: + matchLabels: + {{- include "calico-exporter.ippool-collector.selectorLabels" . | nindent 6 }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + {{- include "calico-exporter.kube-controllers.labels" . | nindent 4 }} + {{- if .Values.serviceMonitor.labels }} + {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} + {{- end }} + name: {{ include "calico-exporter.fullname" . }}-ipam +spec: + namespaceSelector: + matchNames: + - kube-system + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + scrapeTimeout: "{{ .Values.serviceMonitor.scrapeTimeout }}" + interval: "{{ .Values.serviceMonitor.interval }}" + honorLabels: true + port: http-metrics + path: /metrics + scheme: http + metricRelabelings: + - sourceLabels: [__name__] + regex: ipam_(.+) + replacement: calico_ipam_$1 + targetLabel: __name__ + relabelings: + - action: replace + regex: (.*) + replacement: $1 + sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: instance + tlsConfig: + insecureSkipVerify: true + jobLabel: "{{ .Release.Name }}-ipam" + selector: + matchLabels: + {{- include "calico-exporter.kube-controllers.selectorLabels" . | nindent 6 }} diff --git a/charts/prometheus-node-exporter/charts/calico-exporter/values.yaml b/charts/prometheus-node-exporter/charts/calico-exporter/values.yaml new file mode 100644 index 000000000000..30c8f933d364 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/calico-exporter/values.yaml @@ -0,0 +1,98 @@ +# Default values for calico-exporter. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +image: + repository: kubesphere/calico-exporter + tag: v0.2.0 + pullPolicy: IfNotPresent + +## Creates a Prometheus Operator ServiceMonitor +serviceMonitor: + scrapeTimeout: "30s" + enabled: true + interval: "30s" + labels: + app.kubernetes.io/vendor: kubesphere + +calicoService: + type: ClusterIP + port: 9094 + targetPort: 9094 + innerPort: 9094 + nodePort: + annotations: + prometheus.io/scrape: "true" + +service: + type: ClusterIP + port: 9094 + targetPort: 9094 + innerPort: 9093 + nodePort: + annotations: + prometheus.io/scrape: "true" + +# Labels to add to the pod +podLabels: {} + +# Annotations to add to the pod +podAnnotations: {} + +# Set to Kubernetes default updateStrategy by default. +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 500Mi + requests: + cpu: 102m + memory: 180Mi + +serviceAccount: + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: calico-exporter + imagePullSecrets: [] + # Specifies whether to automount API credentials for the ServiceAccount to the pods + automountServiceAccountToken: true + +securityContext: + runAsNonRoot: true + runAsUser: 65534 + +hostNetwork: false + +## Assign a group of affinity scheduling rules +## +affinity: {} +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchFields: +# - key: metadata.name +# operator: In +# values: +# - target-host-name + +## Assign a nodeSelector if operating a hybrid cluster +## +nodeSelector: {} +# beta.kubernetes.io/arch: amd64 +# beta.kubernetes.io/os: linux + +tolerations: + - effect: NoSchedule + operator: Exists + +kubeRbacProxy: + image: kubesphere/kube-rbac-proxy + tag: v0.11.0 + resources: {} \ No newline at end of file diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/Chart.yaml b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/Chart.yaml new file mode 100644 index 000000000000..f56832dfa62e --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +appVersion: "0.5.0" +description: A Helm chart for prometheus process-exporter +name: ProcessExporter +version: 0.5.2 +home: https://github.com/mumoshu/prometheus-process-exporter +sources: +- https://github.com/ncabatoff/process-exporter +keywords: +- process-exporter +- prometheus +- exporter +maintainers: +- email: ykuoka@gmail.comm + name: mumoshu diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/NOTES.txt b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/NOTES.txt new file mode 100644 index 000000000000..27c64957bf3d --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus-process-exporter.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "prometheus-process-exporter.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus-process-exporter.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus-process-exporter.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/_daemonset.tpl b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/_daemonset.tpl new file mode 100644 index 000000000000..043d4a9a7bde --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/_daemonset.tpl @@ -0,0 +1,66 @@ +{{/* +# Containers for the prometheus-process-exporter daemonset. +*/}} +{{- define "prometheus-process-exporter.daemonset.containers" -}} +- name: kube-rbac-proxy-process-exporter + args: + - --logtostderr + - --secure-listen-address=0.0.0.0:{{ .Values.ProcessExporter.service.targetPort }} + - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + - --upstream=http://127.0.0.1:{{ .Values.ProcessExporter.service.innerPort }}/ + + image: "{{ .Values.ProcessExporter.kubeRbacProxy.image }}:{{ .Values.ProcessExporter.kubeRbacProxy.tag }}" + ports: + - containerPort: {{ .Values.ProcessExporter.service.targetPort }} + name: https-metrics + resources: +{{ toYaml .Values.ProcessExporter.kubeRbacProxy.resources | indent 12 }} + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 +- name: process-exporter + image: "{{ .Values.ProcessExporter.image.repository }}:{{ .Values.ProcessExporter.image.tag }}" + imagePullPolicy: {{ .Values.ProcessExporter.image.pullPolicy }} + args: + - --procfs=/host/proc + - --config.path=/var/process-exporter/config.yml + - --web.listen-address=0.0.0.0:{{ .Values.ProcessExporter.service.innerPort }} +{{- if .Values.ProcessExporter.extraArgs }} +{{ toYaml .Values.ProcessExporter.extraArgs | indent 12 }} +{{- end }} + resources: +{{ toYaml .Values.ProcessExporter.resources | indent 12 }} + volumeMounts: + - name: proc + mountPath: /host/proc + readOnly: true + - name: config + mountPath: /var/process-exporter + {{- if .Values.ProcessExporter.extraHostVolumeMounts }} + {{- range $_, $mount := .Values.ProcessExporter.extraHostVolumeMounts }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + readOnly: {{ $mount.readOnly }} + {{- if $mount.mountPropagation }} + mountPropagation: {{ $mount.mountPropagation }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +# Volumes for the prometheus-process-exporter daemonset. +*/}} +{{- define "prometheus-process-exporter.daemonset.volumes" -}} +- name: config + configMap: + name: {{ template "prometheus-process-exporter.fullname" . }} +{{- if .Values.ProcessExporter.extraHostVolumeMounts }} +{{- range $_, $mount := .Values.ProcessExporter.extraHostVolumeMounts }} +- name: {{ $mount.name }} + hostPath: + path: {{ $mount.hostPath }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/_helpers.tpl b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/_helpers.tpl new file mode 100644 index 000000000000..e1bd922282f1 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/_helpers.tpl @@ -0,0 +1,115 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus-process-exporter.name" -}} +{{- if hasKey .Values "ProcessExporter" -}} +{{- default .Chart.Name .Values.ProcessExporter.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "prometheus-process-exporter.fullname" -}} +{{- if hasKey .Values "ProcessExporter" -}} +{{- if .Values.ProcessExporter.fullnameOverride -}} +{{- .Values.ProcessExporter.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.ProcessExporter.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- else -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Generate basic labels */}} +{{- define "prometheus-process-exporter.labels" }} +app: {{ template "prometheus-process-exporter.name" . }} +heritage: {{.Release.Service }} +release: {{.Release.Name }} +chart: {{ template "prometheus-process-exporter.chart" . }} +{{- if .Values.podLabels}} +{{ toYaml .Values.podLabels }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "prometheus-process-exporter.selectorLabels" -}} +{{- if .Values.podLabels }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{ toYaml .Values.podLabels }} +{{- else }} +app: {{ template "prometheus-process-exporter.name" . }} +release: {{.Release.Name }} +{{- end }} +{{- end }} + +{{/* +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus-process-exporter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "prometheus-process-exporter.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "prometheus-process-exporter.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Content of the process-exporter configmap containing all the config files +*/}} +{{- define "prometheus-process-exporter.config" -}} +{{- range $key, $value := .Values.files }} + {{ $key }}: |- +{{ $value | default "" | indent 4 }} +{{- end -}} +{{- range $key, $value := .Values.templates }} + {{ $key }}: |- +{{ $valueWithDefault := default "" $value -}} +{{ tpl $valueWithDefault $ | indent 4 }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "rbac.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/cluserrole.yaml b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/cluserrole.yaml new file mode 100644 index 000000000000..41b611262269 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/cluserrole.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: {{ include "prometheus-process-exporter.labels" . | indent 4 }} + name: {{ template "prometheus-process-exporter.fullname" . }} +rules: + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +{{- end }} diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/clusterrolebinding.yaml b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/clusterrolebinding.yaml new file mode 100644 index 000000000000..3b201091dd07 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/clusterrolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: {{ include "prometheus-process-exporter.labels" . | indent 4 }} + name: {{ template "prometheus-process-exporter.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus-process-exporter.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "prometheus-process-exporter.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/configmap.yaml b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/configmap.yaml new file mode 100644 index 000000000000..42538023fe93 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/configmap.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "prometheus-process-exporter.fullname" . }} + labels: + app: {{ template "prometheus-process-exporter.name" . }} + chart: {{ template "prometheus-process-exporter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: +{{ template "prometheus-process-exporter.config" . }} diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/endpoints.yaml b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/endpoints.yaml new file mode 100644 index 000000000000..62f9d3fab7de --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/endpoints.yaml @@ -0,0 +1,17 @@ +{{- if .Values.endpoints }} +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ template "prometheus-process-exporter.fullname" . }} + labels: +{{ include "prometheus-process-exporter.labels" . | indent 4 }} +subsets: + - addresses: + {{- range .Values.endpoints }} + - ip: {{ . }} + {{- end }} + ports: + - name: metrics + port: 9100 + protocol: TCP +{{- end }} \ No newline at end of file diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/prometheusrules.yaml b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/prometheusrules.yaml new file mode 100644 index 000000000000..1acb20ab77a2 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/prometheusrules.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.serviceMonitor.enabled (.Capabilities.APIVersions.Has "alerting.kubesphere.io/v2beta1/ClusterRuleGroup") }} +apiVersion: alerting.kubesphere.io/v2beta1 +kind: ClusterRuleGroup +metadata: + labels: + alerting.kubesphere.io/enable: 'true' +{{ include "prometheus-process-exporter.labels" . | indent 4 }} + name: {{ template "prometheus-process-exporter.fullname" . }}-rules +spec: + rules: + - alert: NamedprocessGroupExistsZombieThreads + expr: sum by(cluster,node,groupname,instance,job) (namedprocess_namegroup_states{state="Zombie",job="{{ template "prometheus-process-exporter.fullname" . }}"}) > 0 + for: 10m + severity: error + annotations: + summary: The process group has zombie threads. + message: {{`The {{ $labels.groupname }} process group on {{ $labels.node }} node has {{ $value }} zombie threads.`}} + - alert: ProcessExporterDown + expr: up{job="{{ template "prometheus-process-exporter.fullname" . }}"} == 0 + for: 10m + severity: critical + annotations: + summary: Target is unreachable. + message: {{`Process exporter target {{ $labels.instance }} is down.`}} + - alert: ProcessScrapeErrors + expr: increase(namedprocess_scrape_errors{job="{{ template "prometheus-process-exporter.fullname" . }}"}[5m]) > 0 + for: 15m + severity: error + annotations: + summary: Process exporter has failed to scrape metrics. + message: {{`Process exporter {{ $labels.instance }} has encountered {{ $value }} scrape errors in the last 5 minutes.`}} +{{- end }} diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/psp-clusterrole.yaml b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/psp-clusterrole.yaml new file mode 100644 index 000000000000..2c4650648ff6 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/psp-clusterrole.yaml @@ -0,0 +1,17 @@ +{{- if .Values.rbac.create }} +{{- if .Values.rbac.pspEnabled }} +{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} +kind: ClusterRole +apiVersion: {{ template "rbac.apiVersion" . }} +metadata: + labels: {{ include "prometheus-process-exporter.labels" . | indent 4 }} + name: psp-{{ template "prometheus-process-exporter.fullname" . }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "prometheus-process-exporter.fullname" . }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/psp-clusterrolebinding.yaml b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/psp-clusterrolebinding.yaml new file mode 100644 index 000000000000..82151c768a61 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/psp-clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +{{- if .Values.rbac.pspEnabled }} +{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: {{ include "prometheus-process-exporter.labels" . | indent 4 }} + name: psp-{{ template "prometheus-process-exporter.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp-{{ template "prometheus-process-exporter.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus-process-exporter.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/psp.yaml b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/psp.yaml new file mode 100644 index 000000000000..2c8c3bb57d2c --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/psp.yaml @@ -0,0 +1,53 @@ +{{- if .Values.rbac.create }} +{{- if .Values.rbac.pspEnabled }} +{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + labels: {{ include "prometheus-process-exporter.labels" . | indent 4 }} + name: {{ template "prometheus-process-exporter.fullname" . }} +spec: + privileged: false + # Required to prevent escalations to root. + # allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + #requiredDropCapabilities: + # - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + - 'hostPath' + hostNetwork: true + hostIPC: false + hostPID: true + hostPorts: + - min: 0 + max: 65535 + runAsUser: + # Permits the container to run with root privileges as well. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/service.yaml b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/service.yaml new file mode 100644 index 000000000000..d704f2860df5 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "prometheus-process-exporter.fullname" . }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} + labels: {{ include "prometheus-process-exporter.labels" . | indent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + {{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: https-metrics + selector: + {{- include "prometheus-process-exporter.selectorLabels" . | nindent 4 }} diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/servicemonitor.yaml b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/servicemonitor.yaml new file mode 100644 index 000000000000..955a43acc902 --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/templates/servicemonitor.yaml @@ -0,0 +1,43 @@ +{{- if .Values.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "prometheus-process-exporter.fullname" . }} + labels: + chart: {{ template "prometheus-process-exporter.chart" . }} + app: {{ template "prometheus-process-exporter.name" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + {{- if .Values.serviceMonitor.labels }} + {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} + {{- end }} +spec: + endpoints: + - interval: "{{ .Values.serviceMonitor.interval }}" + {{- if .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: "{{ .Values.serviceMonitor.scrapeTimeout }}" + {{- end }} + honorLabels: true + port: https-metrics + path: /metrics + scheme: https + tlsConfig: + insecureSkipVerify: true + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + relabelings: + - action: replace + regex: (.*) + replacement: $1 + sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + app: {{ template "prometheus-process-exporter.name" . }} + release: "{{ .Release.Name }}" + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/charts/prometheus-node-exporter/charts/prometheus-process-exporter/values.yaml b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/values.yaml new file mode 100644 index 000000000000..77002b3dda6b --- /dev/null +++ b/charts/prometheus-node-exporter/charts/prometheus-process-exporter/values.yaml @@ -0,0 +1,133 @@ +# Default values for prometheus-process-exporter. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +image: + repository: ncabatoff/process-exporter + tag: 0.5.0 + pullPolicy: IfNotPresent + +## Specify entries of `process_names:` in the process-exporter config +## See https://github.com/ncabatoff/process-exporter/tree/master#using-a-config-file +groups: +- name: "{{.Comm}}" + cmdline: + - '.+' + +## Creates a Prometheus Operator ServiceMonitor +serviceMonitor: + enabled: true + interval: "30s" + labels: + app.kubernetes.io/vendor: kubesphere + +service: + type: ClusterIP + port: 9201 + targetPort: 9201 + innerPort: 9202 + nodePort: + annotations: + prometheus.io/scrape: "true" + +# Labels to add to the pod +podLabels: {} + +# Annotations to add to the pod +podAnnotations: {} + +# Set to Kubernetes default updateStrategy by default. +# See https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/#daemonset-update-strategy for +# supported configurations. +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + imagePullSecrets: [] + # Specifies whether to automount API credentials for the ServiceAccount to the pods + automountServiceAccountToken: true + +securityContext: + runAsNonRoot: true + runAsUser: 65534 + +hostNetwork: false + +rbac: + ## If true, create & use RBAC resources + ## + create: true + ## If true, create & use Pod Security Policy resources + ## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + pspEnabled: false + +# for deployments that have node_exporter deployed outside of the cluster, list +# their addresses here +endpoints: [] + +## Assign a group of affinity scheduling rules +## +affinity: {} +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchFields: +# - key: metadata.name +# operator: In +# values: +# - target-host-name + +## Assign a nodeSelector if operating a hybrid cluster +## +nodeSelector: {} +# beta.kubernetes.io/arch: amd64 +# beta.kubernetes.io/os: linux + +tolerations: + - effect: NoSchedule + operator: Exists + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +## Additional container arguments +## +extraArgs: {} +# - --collector.diskstats.ignored-devices=^(ram|loop|fd|(h|s|v)d[a-z]|nvme\\d+n\\d+p)\\d+$ + +## Additional mounts from the host +## +extraHostVolumeMounts: {} +# - name: +# hostPath: +# mountPath: +# readOnly: true|false +# mountPropagation: None|HostToContainer|Bidirectional + +templates: + config.yml: | + process_names: + {{ .Values.groups | toYaml }} + +kubeRbacProxy: + image: kubesphere/kube-rbac-proxy + tag: v0.11.0 + resources: {} \ No newline at end of file diff --git a/charts/prometheus-node-exporter/templates/daemonset.yaml b/charts/prometheus-node-exporter/templates/daemonset.yaml index c8a71add18b3..acbaa6d6ee8f 100644 --- a/charts/prometheus-node-exporter/templates/daemonset.yaml +++ b/charts/prometheus-node-exporter/templates/daemonset.yaml @@ -218,6 +218,12 @@ spec: {{ toYaml .Values.kubeRBACProxy.containerSecurityContext | nindent 12 }} {{- end }} {{- end }} + {{- if .Values.ProcessExporter.enabled }} + {{- include "prometheus-process-exporter.daemonset.containers" . | nindent 8 }} + {{- end }} + {{- if .Values.CalicoExporter.enabled }} + {{- include "calico-exporter.daemonset.containers" . | nindent 8 }} + {{- end }} {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} imagePullSecrets: {{- include "prometheus-node-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.imagePullSecrets) | indent 8 }} @@ -282,3 +288,9 @@ spec: configMap: name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config {{- end }} + {{- if .Values.ProcessExporter.enabled }} + {{- include "prometheus-process-exporter.daemonset.volumes" . | nindent 8 }} + {{- end }} + {{- if .Values.CalicoExporter.enabled }} + {{- include "calico-exporter.daemonset.volumes" . | nindent 8 }} + {{- end }} diff --git a/charts/prometheus-node-exporter/values.yaml b/charts/prometheus-node-exporter/values.yaml index ea87e100d018..5809290acd87 100644 --- a/charts/prometheus-node-exporter/values.yaml +++ b/charts/prometheus-node-exporter/values.yaml @@ -255,7 +255,7 @@ serviceAccount: create: true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template - name: + name: prometheus-node-exporter annotations: {} imagePullSecrets: [] automountServiceAccountToken: false @@ -468,3 +468,246 @@ extraManifests: [] # name: prometheus-extra # data: # extra-data: "value" + +# prometheus process exporter configuration +ProcessExporter: + enabled: false + nameOverride: "prometheus-process-exporter" + + # Default values for prometheus-process-exporter. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + image: + repository: ncabatoff/process-exporter + tag: 0.5.0 + pullPolicy: IfNotPresent + + ## Specify entries of `process_names:` in the process-exporter config + ## See https://github.com/ncabatoff/process-exporter/tree/master#using-a-config-file + groups: + - name: "{{.Comm}}" + cmdline: + - '.+' + + ## Creates a Prometheus Operator ServiceMonitor + serviceMonitor: + enabled: true + interval: "30s" + labels: + app.kubernetes.io/vendor: kubesphere + + service: + type: ClusterIP + port: 9201 + targetPort: 9201 + innerPort: 9202 + nodePort: + annotations: + prometheus.io/scrape: "true" + + # Labels to add to the pod + podLabels: + app.kubernetes.io/name: prometheus-node-exporter + + # Annotations to add to the pod + podAnnotations: {} + + # Set to Kubernetes default updateStrategy by default. + # See https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/#daemonset-update-strategy for + # supported configurations. + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + + serviceAccount: + # Specifies whether a ServiceAccount should be created + create: false + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: prometheus-node-exporter + imagePullSecrets: [] + # Specifies whether to automount API credentials for the ServiceAccount to the pods + automountServiceAccountToken: true + + securityContext: + runAsNonRoot: true + runAsUser: 65534 + + hostNetwork: false + + rbac: + ## If true, create & use RBAC resources + ## + create: true + ## If true, create & use Pod Security Policy resources + ## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + pspEnabled: false + + # for deployments that have node_exporter deployed outside of the cluster, list + # their addresses here + endpoints: [] + + ## Assign a group of affinity scheduling rules + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchFields: + # - key: metadata.name + # operator: In + # values: + # - target-host-name + + ## Assign a nodeSelector if operating a hybrid cluster + ## + nodeSelector: {} + # beta.kubernetes.io/arch: amd64 + # beta.kubernetes.io/os: linux + + tolerations: + - effect: NoSchedule + operator: Exists + + ## Assign a PriorityClassName to pods if set + # priorityClassName: "" + + ## Additional container arguments + ## + extraArgs: {} + # - --collector.diskstats.ignored-devices=^(ram|loop|fd|(h|s|v)d[a-z]|nvme\\d+n\\d+p)\\d+$ + + ## Additional mounts from the host + ## + extraHostVolumeMounts: {} + # - name: + # hostPath: + # mountPath: + # readOnly: true|false + # mountPropagation: None|HostToContainer|Bidirectional + + templates: + config.yml: | + process_names: + {{ .Values.groups | toYaml }} + + kubeRbacProxy: + image: kubesphere/kube-rbac-proxy + tag: v0.11.0 + resources: {} + +# calico exporter configuration +CalicoExporter: + enabled: false + nameOverride: "calico-exporter" + # Default values for calico-exporter. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + image: + repository: kubesphere/calico-exporter + tag: v0.2.0 + pullPolicy: IfNotPresent + + ## Creates a Prometheus Operator ServiceMonitor + serviceMonitor: + scrapeTimeout: "30s" + enabled: true + interval: "30s" + labels: + app.kubernetes.io/vendor: kubesphere + + calicoService: + type: ClusterIP + port: 9094 + targetPort: 9094 + innerPort: 9094 + nodePort: + annotations: + prometheus.io/scrape: "true" + + service: + type: ClusterIP + port: 9094 + targetPort: 9094 + innerPort: 9093 + nodePort: + annotations: + prometheus.io/scrape: "true" + + # Labels to add to the pod + podLabels: + app.kubernetes.io/name: prometheus-node-exporter + # Annotations to add to the pod + podAnnotations: {} + + # Set to Kubernetes default updateStrategy by default. + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 500Mi + requests: + cpu: 102m + memory: 180Mi + + serviceAccount: + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: prometheus-node-exporter + imagePullSecrets: [] + # Specifies whether to automount API credentials for the ServiceAccount to the pods + automountServiceAccountToken: true + + securityContext: + runAsNonRoot: true + runAsUser: 65534 + + hostNetwork: false + + ## Assign a group of affinity scheduling rules + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchFields: + # - key: metadata.name + # operator: In + # values: + # - target-host-name + + ## Assign a nodeSelector if operating a hybrid cluster + ## + nodeSelector: {} + # beta.kubernetes.io/arch: amd64 + # beta.kubernetes.io/os: linux + + tolerations: + - effect: NoSchedule + operator: Exists + + kubeRbacProxy: + image: kubesphere/kube-rbac-proxy + tag: v0.11.0 + resources: {}