-
Notifications
You must be signed in to change notification settings - Fork 44
/
values.yaml
131 lines (118 loc) · 3.85 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# Default values for openshift-etcd-backup.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
backup:
# -- Sub directory path
subdir: "/"
# -- Directory name of single backup
dirname: "+etcd-backup-%FT%T%:z"
# -- expiretype could be days (keep backups newer than backup.keepdays,
# count (keep a number of backups with backup.keepcount),
# never (do not expire backups, keep all of them)
expiretype: "days"
# -- Retention period
keepdays: "30"
# -- Count retention if expiretype set to count
keepcount: "10"
# -- Backup schedule
schedule: "0 0 * * *"
# -- Set umask during the backup
umask: "0027"
persistence:
nfs:
# -- Enable nfs backend storage
enabled: false
# -- NFS server name or IP
server: example.com
# -- NFS server path
path: "/etcd-backups"
hostPath:
# -- Enable hostPath
enabled: true
# -- hostPath existing path on host
path: "/mnt/etcd-backups"
provisioning:
# -- Enable provisioned backend storage with default or configured storageClass
enabled: false
storageClass: ""
# -- Define the storage size
capacity: 10Gi
s3:
# -- Enable S3 backend storage
enabled: false
# -- S3 endpoint name
name: etcd-backup
# -- S3 endpoint host
host: https://minio.local:9000
# -- Inject OpenShift CA
ca:
enabled: false
# -- S3 bucket name
bucket: etcd-backup
# -- S3 access key
accessKey: mysuperaccesskey
# -- S3 secret key
secretKey: mysupersecretkey
# -- S3 use an existing Secret instead of creating one
existingSecret: ""
image:
# -- Repository image to use
repository: ghcr.io/adfinis/openshift-etcd-backup
# -- Image pull policy configuration
pullPolicy: Always
# -- Overrides the image tag whose default is the chart appVersion.
tag: ""
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# -- Specifies whether a service account should be created
create: true
# -- Annotations to add to the service account
annotations: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
securityContext:
# -- Run pod as privileged
privileged: true
# -- Set user ID
runAsUser: 0
# -- Set group ID
runAsGroup: 0
# -- Configure SecurityContext of the pod started by the job
podSecurityContext: {}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector:
# -- The backup job should run on masters as etcd runs on them
node-role.kubernetes.io/master: ""
tolerations:
# -- Allow jobs running on masters
- effect: NoSchedule
key: node-role.kubernetes.io/master
affinity: {}
annotations: {}
monitoring:
# -- Deploy PrometheusRule to be alerted in case of backup fails as decribed [here](https://github.com/adfinis/openshift-etcd-backup/blob/main/etcd-backup-cronjob-monitor.PrometheusRule.yaml).
# Be sure to to have monitoring for user defined projects enabled as [described in the upstream documentation](https://docs.openshift.com/container-platform/4.6/monitoring/enabling-monitoring-for-user-defined-projects.html).
enabled: false
rules:
# -- Deploy PrometheusRule to check for cronjob fails.
cronjobMonitor: true
# -- Provide custom recording or alerting rules to be deployed into the cluster.
additionalRules:
# rule-name:
# groups:
# - name: my_group
# rules:
# - record: my_record
# expr: 100 * my_record