diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/.helmignore b/deploy-as-code/helm/charts/backbone-services/jupyterhub/.helmignore new file mode 100644 index 000000000..c784d7ca6 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/.helmignore @@ -0,0 +1,31 @@ +# Anything within the root folder of the Helm chart, where Chart.yaml resides, +# will be embedded into the packaged Helm chart. This is reasonable since only +# when the templates render after the chart has been packaged and distributed, +# will the templates logic evaluate that determines if other files were +# referenced, such as our our files/hub/jupyterhub_config.py. +# +# Here are files that we intentionally ignore to avoid them being packaged, +# because we don't want to reference them from our templates anyhow. +values.schema.yaml + +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/Chart.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/Chart.yaml new file mode 100644 index 000000000..180ccf964 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/Chart.yaml @@ -0,0 +1,19 @@ +# Chart.yaml v2 reference: https://helm.sh/docs/topics/charts/#the-chartyaml-file +apiVersion: v2 +name: jupyterhub +version: 0.0.1-set.by.chartpress +appVersion: "4.1.4" +description: Multi-user Jupyter installation +keywords: [jupyter, jupyterhub, z2jh] +home: https://z2jh.jupyter.org +sources: [https://github.com/jupyterhub/zero-to-jupyterhub-k8s] +icon: https://hub.jupyter.org/helm-chart/images/hublogo.svg +kubeVersion: ">=1.23.0-0" +maintainers: + # Since it is a requirement of Artifact Hub to have specific maintainers + # listed, we have added some below, but in practice the entire JupyterHub team + # contributes to the maintenance of this Helm chart. + - name: Erik Sundell + email: erik@sundellopensource.se + - name: Simon Li + url: https://github.com/manics/ diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/files/hub/jupyterhub_config.py b/deploy-as-code/helm/charts/backbone-services/jupyterhub/files/hub/jupyterhub_config.py new file mode 100644 index 000000000..492aa62fc --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/files/hub/jupyterhub_config.py @@ -0,0 +1,500 @@ +# load the config object (satisfies linters) +c = get_config() # noqa + +import glob +import os +import re +import sys + +from jupyterhub.utils import url_path_join +from kubernetes_asyncio import client +from tornado.httpclient import AsyncHTTPClient + +# Make sure that modules placed in the same directory as the jupyterhub config are added to the pythonpath +configuration_directory = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, configuration_directory) + +from z2jh import ( + get_config, + get_name, + get_name_env, + get_secret_value, + set_config_if_not_none, +) + + +def camelCaseify(s): + """convert snake_case to camelCase + + For the common case where some_value is set from someValue + so we don't have to specify the name twice. + """ + return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), s) + + +# Configure JupyterHub to use the curl backend for making HTTP requests, +# rather than the pure-python implementations. The default one starts +# being too slow to make a large number of requests to the proxy API +# at the rate required. +AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") + +c.JupyterHub.spawner_class = "kubespawner.KubeSpawner" + +# Connect to a proxy running in a different pod. Note that *_SERVICE_* +# environment variables are set by Kubernetes for Services +c.ConfigurableHTTPProxy.api_url = ( + f'http://{get_name("proxy-api")}:{get_name_env("proxy-api", "_SERVICE_PORT")}' +) +c.ConfigurableHTTPProxy.should_start = False + +# Do not shut down user pods when hub is restarted +c.JupyterHub.cleanup_servers = False + +# Check that the proxy has routes appropriately setup +c.JupyterHub.last_activity_interval = 60 + +# Don't wait at all before redirecting a spawning user to the progress page +c.JupyterHub.tornado_settings = { + "slow_spawn_timeout": 0, +} + + +# configure the hub db connection +db_type = get_config("hub.db.type") +if db_type == "sqlite-pvc": + c.JupyterHub.db_url = "sqlite:///jupyterhub.sqlite" +elif db_type == "sqlite-memory": + c.JupyterHub.db_url = "sqlite://" +else: + set_config_if_not_none(c.JupyterHub, "db_url", "hub.db.url") +db_password = get_secret_value("hub.db.password", None) +if db_password is not None: + if db_type == "mysql": + os.environ["MYSQL_PWD"] = db_password + elif db_type == "postgres": + os.environ["PGPASSWORD"] = db_password + else: + print(f"Warning: hub.db.password is ignored for hub.db.type={db_type}") + + +# c.JupyterHub configuration from Helm chart's configmap +for trait, cfg_key in ( + ("concurrent_spawn_limit", None), + ("active_server_limit", None), + ("base_url", None), + ("allow_named_servers", None), + ("named_server_limit_per_user", None), + ("authenticate_prometheus", None), + ("redirect_to_server", None), + ("shutdown_on_logout", None), + ("template_paths", None), + ("template_vars", None), +): + if cfg_key is None: + cfg_key = camelCaseify(trait) + set_config_if_not_none(c.JupyterHub, trait, "hub." + cfg_key) + +# hub_bind_url configures what the JupyterHub process within the hub pod's +# container should listen to. +hub_container_port = 8081 +c.JupyterHub.hub_bind_url = f"http://:{hub_container_port}" + +# hub_connect_url is the URL for connecting to the hub for use by external +# JupyterHub services such as the proxy. Note that *_SERVICE_* environment +# variables are set by Kubernetes for Services. +c.JupyterHub.hub_connect_url = ( + f'http://{get_name("hub")}:{get_name_env("hub", "_SERVICE_PORT")}' +) + +# implement common labels +# this duplicates the jupyterhub.commonLabels helper +common_labels = c.KubeSpawner.common_labels = {} +common_labels["app"] = get_config( + "nameOverride", + default=get_config("Chart.Name", "jupyterhub"), +) +common_labels["heritage"] = "jupyterhub" +chart_name = get_config("Chart.Name") +chart_version = get_config("Chart.Version") +if chart_name and chart_version: + common_labels["chart"] = "{}-{}".format( + chart_name, + chart_version.replace("+", "_"), + ) +release = get_config("Release.Name") +if release: + common_labels["release"] = release + +c.KubeSpawner.namespace = os.environ.get("POD_NAMESPACE", "default") + +# Max number of consecutive failures before the Hub restarts itself +# requires jupyterhub 0.9.2 +set_config_if_not_none( + c.Spawner, + "consecutive_failure_limit", + "hub.consecutiveFailureLimit", +) + +for trait, cfg_key in ( + ("pod_name_template", None), + ("start_timeout", None), + ("image_pull_policy", "image.pullPolicy"), + # ('image_pull_secrets', 'image.pullSecrets'), # Managed manually below + ("events_enabled", "events"), + ("extra_labels", None), + ("extra_annotations", None), + # ("allow_privilege_escalation", None), # Managed manually below + ("uid", None), + ("fs_gid", None), + ("service_account", "serviceAccountName"), + ("storage_extra_labels", "storage.extraLabels"), + # ("tolerations", "extraTolerations"), # Managed manually below + ("node_selector", None), + ("node_affinity_required", "extraNodeAffinity.required"), + ("node_affinity_preferred", "extraNodeAffinity.preferred"), + ("pod_affinity_required", "extraPodAffinity.required"), + ("pod_affinity_preferred", "extraPodAffinity.preferred"), + ("pod_anti_affinity_required", "extraPodAntiAffinity.required"), + ("pod_anti_affinity_preferred", "extraPodAntiAffinity.preferred"), + ("lifecycle_hooks", None), + ("init_containers", None), + ("extra_containers", None), + ("mem_limit", "memory.limit"), + ("mem_guarantee", "memory.guarantee"), + ("cpu_limit", "cpu.limit"), + ("cpu_guarantee", "cpu.guarantee"), + ("extra_resource_limits", "extraResource.limits"), + ("extra_resource_guarantees", "extraResource.guarantees"), + ("environment", "extraEnv"), + ("profile_list", None), + ("extra_pod_config", None), +): + if cfg_key is None: + cfg_key = camelCaseify(trait) + set_config_if_not_none(c.KubeSpawner, trait, "singleuser." + cfg_key) + +image = get_config("singleuser.image.name") +if image: + tag = get_config("singleuser.image.tag") + if tag: + image = f"{image}:{tag}" + + c.KubeSpawner.image = image + +# allow_privilege_escalation defaults to False in KubeSpawner 2+. Since its a +# property where None, False, and True all are valid values that users of the +# Helm chart may want to set, we can't use the set_config_if_not_none helper +# function as someone may want to override the default False value to None. +# +c.KubeSpawner.allow_privilege_escalation = get_config( + "singleuser.allowPrivilegeEscalation" +) + +# Combine imagePullSecret.create (single), imagePullSecrets (list), and +# singleuser.image.pullSecrets (list). +image_pull_secrets = [] +if get_config("imagePullSecret.automaticReferenceInjection") and get_config( + "imagePullSecret.create" +): + image_pull_secrets.append(get_name("image-pull-secret")) +if get_config("imagePullSecrets"): + image_pull_secrets.extend(get_config("imagePullSecrets")) +if get_config("singleuser.image.pullSecrets"): + image_pull_secrets.extend(get_config("singleuser.image.pullSecrets")) +if image_pull_secrets: + c.KubeSpawner.image_pull_secrets = image_pull_secrets + +# scheduling: +if get_config("scheduling.userScheduler.enabled"): + c.KubeSpawner.scheduler_name = get_name("user-scheduler") +if get_config("scheduling.podPriority.enabled"): + c.KubeSpawner.priority_class_name = get_name("priority") + +# add node-purpose affinity +match_node_purpose = get_config("scheduling.userPods.nodeAffinity.matchNodePurpose") +if match_node_purpose: + node_selector = dict( + matchExpressions=[ + dict( + key="hub.jupyter.org/node-purpose", + operator="In", + values=["user"], + ) + ], + ) + if match_node_purpose == "prefer": + c.KubeSpawner.node_affinity_preferred.append( + dict( + weight=100, + preference=node_selector, + ), + ) + elif match_node_purpose == "require": + c.KubeSpawner.node_affinity_required.append(node_selector) + elif match_node_purpose == "ignore": + pass + else: + raise ValueError( + f"Unrecognized value for matchNodePurpose: {match_node_purpose}" + ) + +# Combine the common tolerations for user pods with singleuser tolerations +scheduling_user_pods_tolerations = get_config("scheduling.userPods.tolerations", []) +singleuser_extra_tolerations = get_config("singleuser.extraTolerations", []) +tolerations = scheduling_user_pods_tolerations + singleuser_extra_tolerations +if tolerations: + c.KubeSpawner.tolerations = tolerations + +# Configure dynamically provisioning pvc +storage_type = get_config("singleuser.storage.type") +if storage_type == "dynamic": + pvc_name_template = get_config("singleuser.storage.dynamic.pvcNameTemplate") + c.KubeSpawner.pvc_name_template = pvc_name_template + volume_name_template = get_config("singleuser.storage.dynamic.volumeNameTemplate") + c.KubeSpawner.storage_pvc_ensure = True + set_config_if_not_none( + c.KubeSpawner, "storage_class", "singleuser.storage.dynamic.storageClass" + ) + set_config_if_not_none( + c.KubeSpawner, + "storage_access_modes", + "singleuser.storage.dynamic.storageAccessModes", + ) + set_config_if_not_none( + c.KubeSpawner, "storage_capacity", "singleuser.storage.capacity" + ) + + # Add volumes to singleuser pods + c.KubeSpawner.volumes = [ + { + "name": volume_name_template, + "persistentVolumeClaim": {"claimName": pvc_name_template}, + } + ] + c.KubeSpawner.volume_mounts = [ + { + "mountPath": get_config("singleuser.storage.homeMountPath"), + "name": volume_name_template, + } + ] +elif storage_type == "static": + pvc_claim_name = get_config("singleuser.storage.static.pvcName") + c.KubeSpawner.volumes = [ + {"name": "home", "persistentVolumeClaim": {"claimName": pvc_claim_name}} + ] + + c.KubeSpawner.volume_mounts = [ + { + "mountPath": get_config("singleuser.storage.homeMountPath"), + "name": "home", + "subPath": get_config("singleuser.storage.static.subPath"), + } + ] + +# Inject singleuser.extraFiles as volumes and volumeMounts with data loaded from +# the dedicated k8s Secret prepared to hold the extraFiles actual content. +extra_files = get_config("singleuser.extraFiles", {}) +if extra_files: + volume = { + "name": "files", + } + items = [] + for file_key, file_details in extra_files.items(): + # Each item is a mapping of a key in the k8s Secret to a path in this + # abstract volume, the goal is to enable us to set the mode / + # permissions only though so we don't change the mapping. + item = { + "key": file_key, + "path": file_key, + } + if "mode" in file_details: + item["mode"] = file_details["mode"] + items.append(item) + volume["secret"] = { + "secretName": get_name("singleuser"), + "items": items, + } + c.KubeSpawner.volumes.append(volume) + + volume_mounts = [] + for file_key, file_details in extra_files.items(): + volume_mounts.append( + { + "mountPath": file_details["mountPath"], + "subPath": file_key, + "name": "files", + } + ) + c.KubeSpawner.volume_mounts.extend(volume_mounts) + +# Inject extraVolumes / extraVolumeMounts +c.KubeSpawner.volumes.extend(get_config("singleuser.storage.extraVolumes", [])) +c.KubeSpawner.volume_mounts.extend( + get_config("singleuser.storage.extraVolumeMounts", []) +) + +c.JupyterHub.services = [] +c.JupyterHub.load_roles = [] + +# jupyterhub-idle-culler's permissions are scoped to what it needs only, see +# https://github.com/jupyterhub/jupyterhub-idle-culler#permissions. +# +if get_config("cull.enabled", False): + jupyterhub_idle_culler_role = { + "name": "jupyterhub-idle-culler", + "scopes": [ + "list:users", + "read:users:activity", + "read:servers", + "delete:servers", + # "admin:users", # dynamically added if --cull-users is passed + ], + # assign the role to a jupyterhub service, so it gains these permissions + "services": ["jupyterhub-idle-culler"], + } + + cull_cmd = ["python3", "-m", "jupyterhub_idle_culler"] + base_url = c.JupyterHub.get("base_url", "/") + cull_cmd.append("--url=http://localhost:8081" + url_path_join(base_url, "hub/api")) + + cull_timeout = get_config("cull.timeout") + if cull_timeout: + cull_cmd.append(f"--timeout={cull_timeout}") + + cull_every = get_config("cull.every") + if cull_every: + cull_cmd.append(f"--cull-every={cull_every}") + + cull_concurrency = get_config("cull.concurrency") + if cull_concurrency: + cull_cmd.append(f"--concurrency={cull_concurrency}") + + if get_config("cull.users"): + cull_cmd.append("--cull-users") + jupyterhub_idle_culler_role["scopes"].append("admin:users") + + if not get_config("cull.adminUsers"): + cull_cmd.append("--cull-admin-users=false") + + if get_config("cull.removeNamedServers"): + cull_cmd.append("--remove-named-servers") + + cull_max_age = get_config("cull.maxAge") + if cull_max_age: + cull_cmd.append(f"--max-age={cull_max_age}") + + c.JupyterHub.services.append( + { + "name": "jupyterhub-idle-culler", + "command": cull_cmd, + } + ) + c.JupyterHub.load_roles.append(jupyterhub_idle_culler_role) + +for key, service in get_config("hub.services", {}).items(): + # c.JupyterHub.services is a list of dicts, but + # hub.services is a dict of dicts to make the config mergable + service.setdefault("name", key) + + # As the api_token could be exposed in hub.existingSecret, we need to read + # it it from there or fall back to the chart managed k8s Secret's value. + service.pop("apiToken", None) + service["api_token"] = get_secret_value(f"hub.services.{key}.apiToken") + + c.JupyterHub.services.append(service) + +for key, role in get_config("hub.loadRoles", {}).items(): + # c.JupyterHub.load_roles is a list of dicts, but + # hub.loadRoles is a dict of dicts to make the config mergable + role.setdefault("name", key) + + c.JupyterHub.load_roles.append(role) + +# respect explicit null command (distinct from unspecified) +# this avoids relying on KubeSpawner.cmd's default being None +_unspecified = object() +specified_cmd = get_config("singleuser.cmd", _unspecified) +if specified_cmd is not _unspecified: + c.Spawner.cmd = specified_cmd + +set_config_if_not_none(c.Spawner, "default_url", "singleuser.defaultUrl") + +cloud_metadata = get_config("singleuser.cloudMetadata") + +if cloud_metadata.get("blockWithIptables") == True: + # Use iptables to block access to cloud metadata by default + network_tools_image_name = get_config("singleuser.networkTools.image.name") + network_tools_image_tag = get_config("singleuser.networkTools.image.tag") + network_tools_resources = get_config("singleuser.networkTools.resources") + ip = cloud_metadata["ip"] + ip_block_container = client.V1Container( + name="block-cloud-metadata", + image=f"{network_tools_image_name}:{network_tools_image_tag}", + command=[ + "iptables", + "--append", + "OUTPUT", + "--protocol", + "tcp", + "--destination", + ip, + "--destination-port", + "80", + "--jump", + "DROP", + ], + security_context=client.V1SecurityContext( + privileged=True, + run_as_user=0, + capabilities=client.V1Capabilities(add=["NET_ADMIN"]), + ), + resources=network_tools_resources, + ) + + c.KubeSpawner.init_containers.append(ip_block_container) + + +if get_config("debug.enabled", False): + c.JupyterHub.log_level = "DEBUG" + c.Spawner.debug = True + +# load potentially seeded secrets +# +# NOTE: ConfigurableHTTPProxy.auth_token is set through an environment variable +# that is set using the chart managed secret. +c.JupyterHub.cookie_secret = get_secret_value("hub.config.JupyterHub.cookie_secret") +# NOTE: CryptKeeper.keys should be a list of strings, but we have encoded as a +# single string joined with ; in the k8s Secret. +# +c.CryptKeeper.keys = get_secret_value("hub.config.CryptKeeper.keys").split(";") +c.NotebookApp.terminado_settings = { 'shell_command': ['/bin/bash'] } +c.NotebookApp.allow_origin = '*' +c.NotebookApp.ip = '0.0.0.0' + +# load hub.config values, except potentially seeded secrets already loaded +for app, cfg in get_config("hub.config", {}).items(): + if app == "JupyterHub": + cfg.pop("proxy_auth_token", None) + cfg.pop("cookie_secret", None) + cfg.pop("services", None) + elif app == "ConfigurableHTTPProxy": + cfg.pop("auth_token", None) + elif app == "CryptKeeper": + cfg.pop("keys", None) + c[app].update(cfg) + +# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files +config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d" +if os.path.isdir(config_dir): + for file_path in sorted(glob.glob(f"{config_dir}/*.py")): + file_name = os.path.basename(file_path) + print(f"Loading {config_dir} config: {file_name}") + with open(file_path) as f: + file_content = f.read() + # compiling makes debugging easier: https://stackoverflow.com/a/437857 + exec(compile(source=file_content, filename=file_name, mode="exec")) + +# execute hub.extraConfig entries +for key, config_py in sorted(get_config("hub.extraConfig", {}).items()): + print(f"Loading extra config: {key}") + exec(config_py) diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/files/hub/z2jh.py b/deploy-as-code/helm/charts/backbone-services/jupyterhub/files/hub/z2jh.py new file mode 100644 index 000000000..f4d7be699 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/files/hub/z2jh.py @@ -0,0 +1,122 @@ +""" +Utility methods for use in jupyterhub_config.py and dynamic subconfigs. + +Methods here can be imported by extraConfig in values.yaml +""" + +import os +from collections.abc import Mapping +from functools import lru_cache + +import yaml + + +# memoize so we only load config once +@lru_cache +def _load_config(): + """Load the Helm chart configuration used to render the Helm templates of + the chart from a mounted k8s Secret, and merge in values from an optionally + mounted secret (hub.existingSecret).""" + + cfg = {} + for source in ("secret/values.yaml", "existing-secret/values.yaml"): + path = f"/usr/local/etc/jupyterhub/{source}" + if os.path.exists(path): + print(f"Loading {path}") + with open(path) as f: + values = yaml.safe_load(f) + cfg = _merge_dictionaries(cfg, values) + else: + print(f"No config at {path}") + return cfg + + +@lru_cache +def _get_config_value(key): + """Load value from the k8s ConfigMap given a key.""" + + path = f"/usr/local/etc/jupyterhub/config/{key}" + if os.path.exists(path): + with open(path) as f: + return f.read() + else: + raise Exception(f"{path} not found!") + + +@lru_cache +def get_secret_value(key, default="never-explicitly-set"): + """Load value from the user managed k8s Secret or the default k8s Secret + given a key.""" + + for source in ("existing-secret", "secret"): + path = f"/usr/local/etc/jupyterhub/{source}/{key}" + if os.path.exists(path): + with open(path) as f: + return f.read() + if default != "never-explicitly-set": + return default + raise Exception(f"{key} not found in either k8s Secret!") + + +def get_name(name): + """Returns the fullname of a resource given its short name""" + return _get_config_value(name) + + +def get_name_env(name, suffix=""): + """Returns the fullname of a resource given its short name along with a + suffix, converted to uppercase with dashes replaced with underscores. This + is useful to reference named services associated environment variables, such + as PROXY_PUBLIC_SERVICE_PORT.""" + env_key = _get_config_value(name) + suffix + env_key = env_key.upper().replace("-", "_") + return os.environ[env_key] + + +def _merge_dictionaries(a, b): + """Merge two dictionaries recursively. + + Simplified From https://stackoverflow.com/a/7205107 + """ + merged = a.copy() + for key in b: + if key in a: + if isinstance(a[key], Mapping) and isinstance(b[key], Mapping): + merged[key] = _merge_dictionaries(a[key], b[key]) + else: + merged[key] = b[key] + else: + merged[key] = b[key] + return merged + + +def get_config(key, default=None): + """ + Find a config item of a given name & return it + + Parses everything as YAML, so lists and dicts are available too + + get_config("a.b.c") returns config['a']['b']['c'] + """ + value = _load_config() + # resolve path in yaml + for level in key.split("."): + if not isinstance(value, dict): + # a parent is a scalar or null, + # can't resolve full path + return default + if level not in value: + return default + else: + value = value[level] + return value + + +def set_config_if_not_none(cparent, name, key): + """ + Find a config item of a given name, set the corresponding Jupyter + configuration item if not None + """ + data = get_config(key) + if data is not None: + setattr(cparent, name, data) diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/_helpers-names.tpl b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/_helpers-names.tpl new file mode 100644 index 000000000..8b5e6b60c --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/_helpers-names.tpl @@ -0,0 +1,313 @@ +{{- /* + These helpers encapsulates logic on how we name resources. They also enable + parent charts to reference these dynamic resource names. + + To avoid duplicating documentation, for more information, please see the the + fullnameOverride entry in values.schema.yaml or the configuration reference + that values.schema.yaml renders to. + + https://z2jh.jupyter.org/en/latest/resources/reference.html#fullnameOverride +*/}} + + + +{{- /* + Utility templates +*/}} + +{{- /* + Renders to a prefix for the chart's resource names. This prefix is assumed to + make the resource name cluster unique. +*/}} +{{- define "jupyterhub.fullname" -}} + {{- /* + We have implemented a trick to allow a parent chart depending on this + chart to call these named templates. + + Caveats and notes: + + 1. While parent charts can reference these, grandparent charts can't. + 2. Parent charts must not use an alias for this chart. + 3. There is no failsafe workaround to above due to + https://github.com/helm/helm/issues/9214. + 4. .Chart is of its own type (*chart.Metadata) and needs to be casted + using "toYaml | fromYaml" in order to be able to use normal helm + template functions on it. + */}} + {{- $envOverrides := index .Values (tpl (default .Chart.Name .Values.name) .) -}} + {{- $baseValues := .Values | deepCopy -}} + {{- $values := dict "Values" (mustMergeOverwrite $baseValues $envOverrides) -}} + {{- with mustMergeOverwrite . $values -}} + {{- end -}} + {{- $fullname_override := .Values.fullnameOverride }} + {{- $name_override := .Values.nameOverride }} + {{- if ne .Chart.Name "jupyterhub" }} + {{- if .Values.jupyterhub }} + {{- $fullname_override = .Values.jupyterhub.fullnameOverride }} + {{- $name_override = .Values.jupyterhub.nameOverride }} + {{- end }} + {{- end }} + + {{- if eq (typeOf $fullname_override) "string" }} + {{- $fullname_override }} + {{- else }} + {{- $name := $name_override | default .Chart.Name }} + {{- if contains $name .Release.Name }} + {{- .Release.Name }} + {{- else }} + {{- .Release.Name }}-{{ $name }} + {{- end }} + {{- end }} +{{- end }} + +{{- /* + Renders to a blank string or if the fullname template is truthy renders to it + with an appended dash. +*/}} +{{- define "jupyterhub.fullname.dash" -}} + {{- if (include "jupyterhub.fullname" .) }} + {{- include "jupyterhub.fullname" . }}- + {{- end }} +{{- end }} + + + +{{- /* + Namespaced resources +*/}} + +{{- /* hub Deployment */}} +{{- define "jupyterhub.hub.fullname" -}} + {{- include "jupyterhub.fullname.dash" . }}hub +{{- end }} + +{{- /* hub-serviceaccount ServiceAccount */}} +{{- define "jupyterhub.hub-serviceaccount.fullname" -}} + {{- if .Values.hub.serviceAccount.create }} + {{- .Values.hub.serviceAccount.name | default (include "jupyterhub.hub.fullname" .) }} + {{- else }} + {{- .Values.hub.serviceAccount.name | default "default" }} + {{- end }} +{{- end }} + +{{- /* hub-existing-secret Secret */}} +{{- define "jupyterhub.hub-existing-secret.fullname" -}} + {{- /* A hack to avoid issues from invoking this from a parent Helm chart. */}} + {{- $existing_secret := .Values.hub.existingSecret }} + {{- if ne .Chart.Name "jupyterhub" }} + {{- if .Values.jupyterhub }} + {{- $existing_secret = .Values.jupyterhub.hub.existingSecret }} + {{- end }} + {{- end }} + {{- if $existing_secret }} + {{- $existing_secret }} + {{- end }} +{{- end }} + +{{- /* hub-existing-secret-or-default Secret */}} +{{- define "jupyterhub.hub-existing-secret-or-default.fullname" -}} + {{- include "jupyterhub.hub-existing-secret.fullname" . | default (include "jupyterhub.hub.fullname" .) }} +{{- end }} + +{{- /* hub PVC */}} +{{- define "jupyterhub.hub-pvc.fullname" -}} + {{- include "jupyterhub.hub.fullname" . }}-db-dir +{{- end }} + +{{- /* proxy Deployment */}} +{{- define "jupyterhub.proxy.fullname" -}} + {{- include "jupyterhub.fullname.dash" . }}proxy +{{- end }} + +{{- /* proxy-api Service */}} +{{- define "jupyterhub.proxy-api.fullname" -}} + {{- include "jupyterhub.proxy.fullname" . }}-api +{{- end }} + +{{- /* proxy-http Service */}} +{{- define "jupyterhub.proxy-http.fullname" -}} + {{- include "jupyterhub.proxy.fullname" . }}-http +{{- end }} + +{{- /* proxy-public Service */}} +{{- define "jupyterhub.proxy-public.fullname" -}} + {{- include "jupyterhub.proxy.fullname" . }}-public +{{- end }} + +{{- /* proxy-public-tls Secret */}} +{{- define "jupyterhub.proxy-public-tls.fullname" -}} + {{- include "jupyterhub.proxy-public.fullname" . }}-tls-acme +{{- end }} + +{{- /* proxy-public-manual-tls Secret */}} +{{- define "jupyterhub.proxy-public-manual-tls.fullname" -}} + {{- include "jupyterhub.proxy-public.fullname" . }}-manual-tls +{{- end }} + +{{- /* autohttps Deployment */}} +{{- define "jupyterhub.autohttps.fullname" -}} + {{- include "jupyterhub.fullname.dash" . }}autohttps +{{- end }} + +{{- /* autohttps-serviceaccount ServiceAccount */}} +{{- define "jupyterhub.autohttps-serviceaccount.fullname" -}} + {{- if .Values.proxy.traefik.serviceAccount.create }} + {{- .Values.proxy.traefik.serviceAccount.name | default (include "jupyterhub.autohttps.fullname" .) }} + {{- else }} + {{- .Values.proxy.traefik.serviceAccount.name | default "default" }} + {{- end }} +{{- end }} + +{{- /* user-scheduler Deployment */}} +{{- define "jupyterhub.user-scheduler-deploy.fullname" -}} + {{- include "jupyterhub.fullname.dash" . }}user-scheduler +{{- end }} + +{{- /* user-scheduler-serviceaccount ServiceAccount */}} +{{- define "jupyterhub.user-scheduler-serviceaccount.fullname" -}} + {{- if .Values.scheduling.userScheduler.serviceAccount.create }} + {{- .Values.scheduling.userScheduler.serviceAccount.name | default (include "jupyterhub.user-scheduler-deploy.fullname" .) }} + {{- else }} + {{- .Values.scheduling.userScheduler.serviceAccount.name | default "default" }} + {{- end }} +{{- end }} + +{{- /* user-scheduler leader election lock resource */}} +{{- define "jupyterhub.user-scheduler-lock.fullname" -}} + {{- include "jupyterhub.user-scheduler-deploy.fullname" . }}-lock +{{- end }} + +{{- /* user-placeholder StatefulSet */}} +{{- define "jupyterhub.user-placeholder.fullname" -}} + {{- include "jupyterhub.fullname.dash" . }}user-placeholder +{{- end }} + +{{- /* image-awaiter Job */}} +{{- define "jupyterhub.hook-image-awaiter.fullname" -}} + {{- include "jupyterhub.fullname.dash" . }}hook-image-awaiter +{{- end }} + +{{- /* image-awaiter-serviceaccount ServiceAccount */}} +{{- define "jupyterhub.hook-image-awaiter-serviceaccount.fullname" -}} + {{- if .Values.prePuller.hook.serviceAccount.create }} + {{- .Values.prePuller.hook.serviceAccount.name | default (include "jupyterhub.hook-image-awaiter.fullname" .) }} + {{- else }} + {{- .Values.prePuller.hook.serviceAccount.name | default "default" }} + {{- end }} +{{- end }} + +{{- /* hook-image-puller DaemonSet */}} +{{- define "jupyterhub.hook-image-puller.fullname" -}} + {{- include "jupyterhub.fullname.dash" . }}hook-image-puller +{{- end }} + +{{- /* continuous-image-puller DaemonSet */}} +{{- define "jupyterhub.continuous-image-puller.fullname" -}} + {{- include "jupyterhub.fullname.dash" . }}continuous-image-puller +{{- end }} + +{{- /* singleuser NetworkPolicy */}} +{{- define "jupyterhub.singleuser.fullname" -}} + {{- include "jupyterhub.fullname.dash" . }}singleuser +{{- end }} + +{{- /* image-pull-secret Secret */}} +{{- define "jupyterhub.image-pull-secret.fullname" -}} + {{- include "jupyterhub.fullname.dash" . }}image-pull-secret +{{- end }} + +{{- /* Ingress */}} +{{- define "jupyterhub.ingress.fullname" -}} + {{- if (include "jupyterhub.fullname" .) }} + {{- include "jupyterhub.fullname" . }} + {{- else -}} + jupyterhub + {{- end }} +{{- end }} + + + +{{- /* + Cluster wide resources + + We enforce uniqueness of names for our cluster wide resources. We assume that + the prefix from setting fullnameOverride to null or a string will be cluster + unique. +*/}} + +{{- /* Priority */}} +{{- define "jupyterhub.priority.fullname" -}} + {{- if (include "jupyterhub.fullname" .) }} + {{- include "jupyterhub.fullname" . }} + {{- else }} + {{- .Release.Name }}-default-priority + {{- end }} +{{- end }} + +{{- /* user-placeholder Priority */}} +{{- define "jupyterhub.user-placeholder-priority.fullname" -}} + {{- if (include "jupyterhub.fullname" .) }} + {{- include "jupyterhub.user-placeholder.fullname" . }} + {{- else }} + {{- .Release.Name }}-user-placeholder-priority + {{- end }} +{{- end }} + +{{- /* image-puller Priority */}} +{{- define "jupyterhub.image-puller-priority.fullname" -}} + {{- if (include "jupyterhub.fullname" .) }} + {{- include "jupyterhub.fullname.dash" . }}image-puller + {{- else }} + {{- .Release.Name }}-image-puller-priority + {{- end }} +{{- end }} + +{{- /* user-scheduler's registered name */}} +{{- define "jupyterhub.user-scheduler.fullname" -}} + {{- if (include "jupyterhub.fullname" .) }} + {{- include "jupyterhub.user-scheduler-deploy.fullname" . }} + {{- else }} + {{- .Release.Name }}-user-scheduler + {{- end }} +{{- end }} + + + +{{- /* + A template to render all the named templates in this file for use in the + hub's ConfigMap. + + It is important we keep this in sync with the available templates. +*/}} +{{- define "jupyterhub.name-templates" -}} +fullname: {{ include "jupyterhub.fullname" . | quote }} +fullname-dash: {{ include "jupyterhub.fullname.dash" . | quote }} +hub: {{ include "jupyterhub.hub.fullname" . | quote }} +hub-serviceaccount: {{ include "jupyterhub.hub-serviceaccount.fullname" . | quote }} +hub-existing-secret: {{ include "jupyterhub.hub-existing-secret.fullname" . | quote }} +hub-existing-secret-or-default: {{ include "jupyterhub.hub-existing-secret-or-default.fullname" . | quote }} +hub-pvc: {{ include "jupyterhub.hub-pvc.fullname" . | quote }} +proxy: {{ include "jupyterhub.proxy.fullname" . | quote }} +proxy-api: {{ include "jupyterhub.proxy-api.fullname" . | quote }} +proxy-http: {{ include "jupyterhub.proxy-http.fullname" . | quote }} +proxy-public: {{ include "jupyterhub.proxy-public.fullname" . | quote }} +proxy-public-tls: {{ include "jupyterhub.proxy-public-tls.fullname" . | quote }} +proxy-public-manual-tls: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . | quote }} +autohttps: {{ include "jupyterhub.autohttps.fullname" . | quote }} +autohttps-serviceaccount: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . | quote }} +user-scheduler-deploy: {{ include "jupyterhub.user-scheduler-deploy.fullname" . | quote }} +user-scheduler-serviceaccount: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . | quote }} +user-scheduler-lock: {{ include "jupyterhub.user-scheduler-lock.fullname" . | quote }} +user-placeholder: {{ include "jupyterhub.user-placeholder.fullname" . | quote }} +image-puller-priority: {{ include "jupyterhub.image-puller-priority.fullname" . | quote }} +hook-image-awaiter: {{ include "jupyterhub.hook-image-awaiter.fullname" . | quote }} +hook-image-awaiter-serviceaccount: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . | quote }} +hook-image-puller: {{ include "jupyterhub.hook-image-puller.fullname" . | quote }} +continuous-image-puller: {{ include "jupyterhub.continuous-image-puller.fullname" . | quote }} +singleuser: {{ include "jupyterhub.singleuser.fullname" . | quote }} +image-pull-secret: {{ include "jupyterhub.image-pull-secret.fullname" . | quote }} +ingress: {{ include "jupyterhub.ingress.fullname" . | quote }} +priority: {{ include "jupyterhub.priority.fullname" . | quote }} +user-placeholder-priority: {{ include "jupyterhub.user-placeholder-priority.fullname" . | quote }} +user-scheduler: {{ include "jupyterhub.user-scheduler.fullname" . | quote }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/_helpers-netpol.tpl b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/_helpers-netpol.tpl new file mode 100644 index 000000000..006f63322 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/_helpers-netpol.tpl @@ -0,0 +1,101 @@ +{{- /* + This named template renders egress rules for NetworkPolicy resources based on + common configuration. + + It is rendering based on the `egressAllowRules` and `egress` keys of the + passed networkPolicy config object. Each flag set to true under + `egressAllowRules` is rendered to a egress rule that next to any custom user + defined rules from the `egress` config. + + This named template needs to render based on a specific networkPolicy + resource, but also needs access to the root context. Due to that, it + accepts a list as its scope, where the first element is supposed to be the + root context and the second element is supposed to be the networkPolicy + configuration object. + + As an example, this is how you would render this named template from a + NetworkPolicy resource under its egress: + + egress: + # other rules here... + + {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.hub.networkPolicy)) }} + {{- . | nindent 4 }} + {{- end }} + + Note that the reference to privateIPs and nonPrivateIPs relate to + https://en.wikipedia.org/wiki/Private_network#Private_IPv4_addresses. +*/}} + +{{- define "jupyterhub.networkPolicy.renderEgressRules" -}} +{{- $root := index . 0 }} +{{- $netpol := index . 1 }} +{{- if or (or $netpol.egressAllowRules.dnsPortsCloudMetadataServer $netpol.egressAllowRules.dnsPortsKubeSystemNamespace) $netpol.egressAllowRules.dnsPortsPrivateIPs }} +- ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + to: + {{- if $netpol.egressAllowRules.dnsPortsCloudMetadataServer }} + # Allow outbound connections to DNS ports on the cloud metadata server + - ipBlock: + cidr: {{ $root.Values.singleuser.cloudMetadata.ip }}/32 + {{- end }} + {{- if $netpol.egressAllowRules.dnsPortsKubeSystemNamespace }} + # Allow outbound connections to DNS ports on pods in the kube-system + # namespace + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + {{- end }} + {{- if $netpol.egressAllowRules.dnsPortsPrivateIPs }} + # Allow outbound connections to DNS ports on destinations in the private IP + # ranges + - ipBlock: + cidr: 10.0.0.0/8 + - ipBlock: + cidr: 172.16.0.0/12 + - ipBlock: + cidr: 192.168.0.0/16 + {{- end }} +{{- end }} + +{{- if $netpol.egressAllowRules.nonPrivateIPs }} +# Allow outbound connections to non-private IP ranges +- to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + # As part of this rule: + # - don't allow outbound connections to private IPs + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + # - don't allow outbound connections to the cloud metadata server + - {{ $root.Values.singleuser.cloudMetadata.ip }}/32 +{{- end }} + +{{- if $netpol.egressAllowRules.privateIPs }} +# Allow outbound connections to private IP ranges +- to: + - ipBlock: + cidr: 10.0.0.0/8 + - ipBlock: + cidr: 172.16.0.0/12 + - ipBlock: + cidr: 192.168.0.0/16 +{{- end }} + +{{- if $netpol.egressAllowRules.cloudMetadataServer }} +# Allow outbound connections to the cloud metadata server +- to: + - ipBlock: + cidr: {{ $root.Values.singleuser.cloudMetadata.ip }}/32 +{{- end }} + +{{- with $netpol.egress }} +# Allow outbound connections based on user specified rules +{{ . | toYaml }} +{{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/_helpers.tpl b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/_helpers.tpl new file mode 100644 index 000000000..6ed2f4212 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/_helpers.tpl @@ -0,0 +1,397 @@ +{{- /* + ## About + This file contains helpers to systematically name, label and select Kubernetes + objects we define in the .yaml template files. + + + ## How helpers work + Helm helper functions is a good way to avoid repeating something. They will + generate some output based on one single dictionary of input that we call the + helpers scope. When you are in helm, you access your current scope with a + single a single punctuation (.). + + When you ask a helper to render its content, one often forward the current + scope to the helper in order to allow it to access .Release.Name, + .Values.rbac.create and similar values. + + #### Example - Passing the current scope + {{ include "jupyterhub.commonLabels" . }} + + It would be possible to pass something specific instead of the current scope + (.), but that would make .Release.Name etc. inaccessible by the helper which + is something we aim to avoid. + + #### Example - Passing a new scope + {{ include "demo.bananaPancakes" (dict "pancakes" 5 "bananas" 3) }} + + To let a helper access the current scope along with additional values we have + opted to create dictionary containing additional values that is then populated + with additional values from the current scope through a the merge function. + + #### Example - Passing a new scope augmented with the old + {{- $_ := merge (dict "appLabel" "kube-lego") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 6 }} + + In this way, the code within the definition of `jupyterhub.matchLabels` will + be able to access .Release.Name and .appLabel. + + NOTE: + The ordering of merge is crucial, the latter argument is merged into the + former. So if you would swap the order you would influence the current scope + risking unintentional behavior. Therefore, always put the fresh unreferenced + dictionary (dict "key1" "value1") first and the current scope (.) last. + + + ## Declared helpers + - appLabel | + - componentLabel | + - commonLabels | uses appLabel + - labels | uses commonLabels + - matchLabels | uses labels + - podCullerSelector | uses matchLabels + + + ## Example usage + ```yaml + # Excerpt from proxy/autohttps/deployment.yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ include "jupyterhub.autohttps.fullname" . }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + spec: + selector: + matchLabels: + {{- include "jupyterhub.matchLabels" $_ | nindent 6 }} + template: + metadata: + labels: + {{- include "jupyterhub.labels" $_ | nindent 8 }} + hub.jupyter.org/network-access-proxy-http: "true" + ``` + + NOTE: + The "jupyterhub.matchLabels" and "jupyterhub.labels" is passed an augmented + scope that will influence the helpers' behavior. It get the current scope + "." but merged with a dictionary containing extra key/value pairs. In this + case the "." scope was merged with a small dictionary containing only one + key/value pair "appLabel: kube-lego". It is required for kube-lego to + function properly. It is a way to override the default app label's value. +*/}} + + +{{- /* + jupyterhub.appLabel: + Used by "jupyterhub.labels". +*/}} +{{- define "jupyterhub.appLabel" -}} +{{ .Values.nameOverride | default .Chart.Name | trunc 63 | trimSuffix "-" }} +{{- end }} + + +{{- /* + jupyterhub.componentLabel: + Used by "jupyterhub.labels". + + NOTE: The component label is determined by either... + - 1: The provided scope's .componentLabel + - 2: The template's filename if living in the root folder + - 3: The template parent folder's name + - : ...and is combined with .componentPrefix and .componentSuffix +*/}} +{{- define "jupyterhub.componentLabel" -}} +{{- $file := .Template.Name | base | trimSuffix ".yaml" -}} +{{- $parent := .Template.Name | dir | base | trimPrefix "templates" -}} +{{- $component := .componentLabel | default $parent | default $file -}} +{{- $component := print (.componentPrefix | default "") $component (.componentSuffix | default "") -}} +{{ $component }} +{{- end }} + + +{{- /* + jupyterhub.commonLabels: + Foundation for "jupyterhub.labels". + Provides labels: app, release, (chart and heritage). +*/}} +{{- define "jupyterhub.commonLabels" -}} +app: {{ .appLabel | default (include "jupyterhub.appLabel" .) }} +{{- end }} + + +{{- /* + jupyterhub.labels: + Provides labels: component, app, release, (chart and heritage). +*/}} +{{- define "jupyterhub.labels" -}} +component: {{ include "jupyterhub.componentLabel" . }} +{{ include "jupyterhub.commonLabels" . }} +{{- end }} + + +{{- /* + jupyterhub.matchLabels: + Used to provide pod selection labels: component, app, release. +*/}} +{{- define "jupyterhub.matchLabels" -}} +{{- $_ := merge (dict "matchLabels" true) . -}} +{{ include "jupyterhub.labels" $_ }} +{{- end }} + + +{{- /* + jupyterhub.dockerconfigjson: + Creates a base64 encoded docker registry json blob for use in a image pull + secret, just like the `kubectl create secret docker-registry` command does + for the generated secrets data.dockerconfigjson field. The output is + verified to be exactly the same even if you have a password spanning + multiple lines as you may need to use a private GCR registry. + + - https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod +*/}} +{{- define "jupyterhub.dockerconfigjson" -}} +{{ include "jupyterhub.dockerconfigjson.yaml" . | b64enc }} +{{- end }} + +{{- define "jupyterhub.dockerconfigjson.yaml" -}} +{{- with .Values.imagePullSecret -}} +{ + "auths": { + {{ .registry | default "https://index.docker.io/v1/" | quote }}: { + "username": {{ .username | quote }}, + "password": {{ .password | quote }}, + {{- if .email }} + "email": {{ .email | quote }}, + {{- end }} + "auth": {{ (print .username ":" .password) | b64enc | quote }} + } + } +} +{{- end }} +{{- end }} + +{{- /* + jupyterhub.imagePullSecrets + Augments passed .pullSecrets with $.Values.imagePullSecrets +*/}} +{{- define "jupyterhub.imagePullSecrets" -}} + {{- /* + We have implemented a trick to allow a parent chart depending on this + chart to call this named templates. + + Caveats and notes: + + 1. While parent charts can reference these, grandparent charts can't. + 2. Parent charts must not use an alias for this chart. + 3. There is no failsafe workaround to above due to + https://github.com/helm/helm/issues/9214. + 4. .Chart is of its own type (*chart.Metadata) and needs to be casted + using "toYaml | fromYaml" in order to be able to use normal helm + template functions on it. + */}} + {{- $jupyterhub_values := .root.Values }} + {{- if ne .root.Chart.Name "jupyterhub" }} + {{- if .root.Values.jupyterhub }} + {{- $jupyterhub_values = .root.Values.jupyterhub }} + {{- end }} + {{- end }} + + {{- /* Populate $_.list with all relevant entries */}} + {{- $_ := dict "list" (concat .image.pullSecrets $jupyterhub_values.imagePullSecrets | uniq) }} + {{- if and $jupyterhub_values.imagePullSecret.create $jupyterhub_values.imagePullSecret.automaticReferenceInjection }} + {{- $__ := set $_ "list" (append $_.list (include "jupyterhub.image-pull-secret.fullname" .root) | uniq) }} + {{- end }} + + {{- /* Decide if something should be written */}} + {{- if not (eq ($_.list | toJson) "[]") }} + + {{- /* Process the $_.list where strings become dicts with a name key and the + strings become the name keys' values into $_.res */}} + {{- $_ := set $_ "res" list }} + {{- range $_.list }} + {{- if eq (typeOf .) "string" }} + {{- $__ := set $_ "res" (append $_.res (dict "name" .)) }} + {{- else }} + {{- $__ := set $_ "res" (append $_.res .) }} + {{- end }} + {{- end }} + + {{- /* Write the results */}} + {{- $_.res | toJson }} + + {{- end }} +{{- end }} + +{{- /* + jupyterhub.singleuser.resources: + The resource request of a singleuser. +*/}} +{{- define "jupyterhub.singleuser.resources" -}} +{{- $r1 := .Values.singleuser.cpu.guarantee -}} +{{- $r2 := .Values.singleuser.memory.guarantee -}} +{{- $r3 := .Values.singleuser.extraResource.guarantees -}} +{{- $r := or $r1 $r2 $r3 -}} +{{- $l1 := .Values.singleuser.cpu.limit -}} +{{- $l2 := .Values.singleuser.memory.limit -}} +{{- $l3 := .Values.singleuser.extraResource.limits -}} +{{- $l := or $l1 $l2 $l3 -}} +{{- if $r -}} +requests: + {{- if $r1 }} + cpu: {{ .Values.singleuser.cpu.guarantee }} + {{- end }} + {{- if $r2 }} + memory: {{ .Values.singleuser.memory.guarantee }} + {{- end }} + {{- if $r3 }} + {{- range $key, $value := .Values.singleuser.extraResource.guarantees }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} +{{- end }} + +{{- if $l }} +limits: + {{- if $l1 }} + cpu: {{ .Values.singleuser.cpu.limit }} + {{- end }} + {{- if $l2 }} + memory: {{ .Values.singleuser.memory.limit }} + {{- end }} + {{- if $l3 }} + {{- range $key, $value := .Values.singleuser.extraResource.limits }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} + +{{- /* + jupyterhub.extraEnv: + Output YAML formatted EnvVar entries for use in a containers env field. +*/}} +{{- define "jupyterhub.extraEnv" -}} +{{- include "jupyterhub.extraEnv.withTrailingNewLine" . | trimSuffix "\n" }} +{{- end }} + +{{- define "jupyterhub.extraEnv.withTrailingNewLine" -}} +{{- if . }} +{{- /* If extraEnv is a list, we inject it as it is. */}} +{{- if eq (typeOf .) "[]interface {}" }} +{{- . | toYaml }} + +{{- /* If extraEnv is a map, we differentiate two cases: */}} +{{- else if eq (typeOf .) "map[string]interface {}" }} +{{- range $key, $value := . }} +{{- /* + - If extraEnv.someKey has a map value, then we add the value as a YAML + parsed list element and use the key as the name value unless its + explicitly set. +*/}} +{{- if eq (typeOf $value) "map[string]interface {}" }} +{{- merge (dict) $value (dict "name" $key) | list | toYaml | println }} +{{- /* + - If extraEnv.someKey has a string value, then we use the key as the + environment variable name for the value. +*/}} +{{- else if eq (typeOf $value) "string" -}} +- name: {{ $key | quote }} + value: {{ $value | quote | println }} +{{- else }} +{{- printf "?.extraEnv.%s had an unexpected type (%s)" $key (typeOf $value) | fail }} +{{- end }} +{{- end }} {{- /* end of range */}} +{{- end }} +{{- end }} {{- /* end of: if . */}} +{{- end }} {{- /* end of definition */}} + +{{- /* + jupyterhub.extraFiles.data: + Renders content for a k8s Secret's data field, coming from extraFiles with + binaryData entries. +*/}} +{{- define "jupyterhub.extraFiles.data.withNewLineSuffix" -}} + {{- range $file_key, $file_details := . }} + {{- include "jupyterhub.extraFiles.validate-file" (list $file_key $file_details) }} + {{- if $file_details.binaryData }} + {{- $file_key | quote }}: {{ $file_details.binaryData | nospace | quote }}{{ println }} + {{- end }} + {{- end }} +{{- end }} +{{- define "jupyterhub.extraFiles.data" -}} + {{- include "jupyterhub.extraFiles.data.withNewLineSuffix" . | trimSuffix "\n" }} +{{- end }} + +{{- /* + jupyterhub.extraFiles.stringData: + Renders content for a k8s Secret's stringData field, coming from extraFiles + with either data or stringData entries. +*/}} +{{- define "jupyterhub.extraFiles.stringData.withNewLineSuffix" -}} + {{- range $file_key, $file_details := . }} + {{- include "jupyterhub.extraFiles.validate-file" (list $file_key $file_details) }} + {{- $file_name := $file_details.mountPath | base }} + {{- if $file_details.stringData }} + {{- $file_key | quote }}: | + {{- $file_details.stringData | trimSuffix "\n" | nindent 2 }}{{ println }} + {{- end }} + {{- if $file_details.data }} + {{- $file_key | quote }}: | + {{- if or (eq (ext $file_name) ".yaml") (eq (ext $file_name) ".yml") }} + {{- $file_details.data | toYaml | nindent 2 }}{{ println }} + {{- else if eq (ext $file_name) ".json" }} + {{- $file_details.data | toJson | nindent 2 }}{{ println }} + {{- else if eq (ext $file_name) ".toml" }} + {{- $file_details.data | toToml | trimSuffix "\n" | nindent 2 }}{{ println }} + {{- else }} + {{- print "\n\nextraFiles entries with 'data' (" $file_key " > " $file_details.mountPath ") needs to have a filename extension of .yaml, .yml, .json, or .toml!" | fail }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- define "jupyterhub.extraFiles.stringData" -}} + {{- include "jupyterhub.extraFiles.stringData.withNewLineSuffix" . | trimSuffix "\n" }} +{{- end }} + +{{- define "jupyterhub.extraFiles.validate-file" -}} + {{- $file_key := index . 0 }} + {{- $file_details := index . 1 }} + + {{- /* Use of mountPath. */}} + {{- if not ($file_details.mountPath) }} + {{- print "\n\nextraFiles entries (" $file_key ") must contain the field 'mountPath'." | fail }} + {{- end }} + + {{- /* Use one of stringData, binaryData, data. */}} + {{- $field_count := 0 }} + {{- if $file_details.data }} + {{- $field_count = add1 $field_count }} + {{- end }} + {{- if $file_details.stringData }} + {{- $field_count = add1 $field_count }} + {{- end }} + {{- if $file_details.binaryData }} + {{- $field_count = add1 $field_count }} + {{- end }} + {{- if ne $field_count 1 }} + {{- print "\n\nextraFiles entries (" $file_key ") must only contain one of the fields: 'data', 'stringData', and 'binaryData'." | fail }} + {{- end }} +{{- end }} + +{{- /* + jupyterhub.chart-version-to-git-ref: + Renders a valid git reference from a chartpress generated version string. + In practice, either a git tag or a git commit hash will be returned. + + - The version string will follow a chartpress pattern, see + https://github.com/jupyterhub/chartpress#examples-chart-versions-and-image-tags. + + - The regexReplaceAll function is a sprig library function, see + https://masterminds.github.io/sprig/strings.html. + + - The regular expression is in golang syntax, but \d had to become \\d for + example. +*/}} +{{- define "jupyterhub.chart-version-to-git-ref" -}} +{{- regexReplaceAll ".*[.-]n\\d+[.]h(.*)" . "${1}" }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/_helpers-passwords.tpl b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/_helpers-passwords.tpl new file mode 100644 index 000000000..83edf70bd --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/_helpers-passwords.tpl @@ -0,0 +1,92 @@ +{{- /* + This file contains logic to lookup already + generated passwords or generate a new. + + proxy.secretToken / hub.config.ConfigurableHTTPProxy.auth_token + hub.cookieSecret / hub.config.JupyterHub.cookie_secret + auth.state.cryptoKey* / hub.config.CryptKeeper.keys + + *Note that the entire auth section is deprecated and users + are forced through "fail" in NOTES.txt to migrate to hub.config. + + Note that lookup logic returns falsy value when run with + `helm diff upgrade`, so it is a bit troublesome to test. +*/}} + +{{- /* + Returns given number of random Hex characters. + + - randNumeric 4 | atoi generates a random number in [0, 10^4) + This is a range range evenly divisble by 16, but even if off by one, + that last partial interval offsetting randomness is only 1 part in 625. + - mod N 16 maps to the range 0-15 + - printf "%x" represents a single number 0-15 as a single hex character +*/}} +{{- define "jupyterhub.randHex" -}} + {{- $result := "" }} + {{- range $i := until . }} + {{- $rand_hex_char := mod (randNumeric 4 | atoi) 16 | printf "%x" }} + {{- $result = print $result $rand_hex_char }} + {{- end }} + {{- $result }} +{{- end }} + +{{- define "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" -}} + {{- if (.Values.hub.config | dig "ConfigurableHTTPProxy" "auth_token" "") }} + {{- .Values.hub.config.ConfigurableHTTPProxy.auth_token }} + {{- else if .Values.proxy.secretToken }} + {{- .Values.proxy.secretToken }} + {{- else }} + {{- $k8s_state := lookup "v1" "Secret" .Release.Namespace (include "jupyterhub.hub.fullname" .) | default (dict "data" (dict)) }} + {{- if hasKey $k8s_state.data "hub.config.ConfigurableHTTPProxy.auth_token" }} + {{- index $k8s_state.data "hub.config.ConfigurableHTTPProxy.auth_token" | b64dec }} + {{- else }} + {{- randAlphaNum 64 }} + {{- end }} + {{- end }} +{{- end }} + +{{- define "jupyterhub.hub.config.JupyterHub.cookie_secret" -}} + {{- if (.Values.hub.config | dig "JupyterHub" "cookie_secret" "") }} + {{- .Values.hub.config.JupyterHub.cookie_secret }} + {{- else if .Values.hub.cookieSecret }} + {{- .Values.hub.cookieSecret }} + {{- else }} + {{- $k8s_state := lookup "v1" "Secret" .Release.Namespace (include "jupyterhub.hub.fullname" .) | default (dict "data" (dict)) }} + {{- if hasKey $k8s_state.data "hub.config.JupyterHub.cookie_secret" }} + {{- index $k8s_state.data "hub.config.JupyterHub.cookie_secret" | b64dec }} + {{- else }} + {{- include "jupyterhub.randHex" 64 }} + {{- end }} + {{- end }} +{{- end }} + +{{- define "jupyterhub.hub.config.CryptKeeper.keys" -}} + {{- if (.Values.hub.config | dig "CryptKeeper" "keys" "") }} + {{- .Values.hub.config.CryptKeeper.keys | join ";" }} + {{- else }} + {{- $k8s_state := lookup "v1" "Secret" .Release.Namespace (include "jupyterhub.hub.fullname" .) | default (dict "data" (dict)) }} + {{- if hasKey $k8s_state.data "hub.config.CryptKeeper.keys" }} + {{- index $k8s_state.data "hub.config.CryptKeeper.keys" | b64dec }} + {{- else }} + {{- include "jupyterhub.randHex" 64 }} + {{- end }} + {{- end }} +{{- end }} + +{{- define "jupyterhub.hub.services.get_api_token" -}} + {{- $_ := index . 0 }} + {{- $service_key := index . 1 }} + {{- $explicitly_set_api_token := or ($_.Values.hub.services | dig $service_key "api_token" "") ($_.Values.hub.services | dig $service_key "apiToken" "") }} + {{- if $explicitly_set_api_token }} + {{- $explicitly_set_api_token }} + {{- else }} + {{- $k8s_state := lookup "v1" "Secret" $_.Release.Namespace (include "jupyterhub.hub.fullname" $_) | default (dict "data" (dict)) }} + {{- $k8s_secret_key := print "hub.services." $service_key ".apiToken" }} + {{- if hasKey $k8s_state.data $k8s_secret_key }} + {{- index $k8s_state.data $k8s_secret_key | b64dec }} + {{- else }} + {{- include "jupyterhub.randHex" 64 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/configmap.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/configmap.yaml new file mode 100644 index 000000000..801279d80 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/configmap.yaml @@ -0,0 +1,31 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ include "jupyterhub.hub.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +data: + {{- /* + Resource names exposed to reliably reference them. + + user-scheduler: "my-helm-release-user-scheduler" + ... + */}} + {{- include "jupyterhub.name-templates" . | nindent 2 }} + + {{- /* + Glob files to allow them to be mounted by the hub pod + + jupyterhub_config: | + multi line string content... + z2jh.py: | + multi line string content... + */}} + {{- (.Files.Glob "files/hub/*").AsConfig | nindent 2 }} + + {{- /* + Store away a checksum of the hook-image-puller daemonset so future upgrades + can compare and decide if it should run or not using the `lookup` function. + */}} + checksum_hook-image-puller: {{ include "jupyterhub.imagePuller.daemonset.hook.checksum" . | quote }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/deployment.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/deployment.yaml new file mode 100644 index 000000000..b1add47b0 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/deployment.yaml @@ -0,0 +1,244 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "jupyterhub.hub.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + {{- if not (typeIs "" .Values.hub.revisionHistoryLimit) }} + revisionHistoryLimit: {{ .Values.hub.revisionHistoryLimit }} + {{- end }} + replicas: 1 + selector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} + strategy: + {{- .Values.hub.deploymentStrategy | toYaml | nindent 4 }} + template: + metadata: + labels: + {{- /* Changes here will cause the Deployment to restart the pods. */}} + {{- include "jupyterhub.matchLabels" . | nindent 8 }} + hub.jupyter.org/network-access-proxy-api: "true" + hub.jupyter.org/network-access-proxy-http: "true" + hub.jupyter.org/network-access-singleuser: "true" + {{- with .Values.hub.labels }} + {{- . | toYaml | nindent 8 }} + {{- end }} + annotations: + {{- /* This lets us autorestart when the secret changes! */}} + checksum/config-map: {{ include (print .Template.BasePath "/hub/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print .Template.BasePath "/hub/secret.yaml") . | sha256sum }} + {{- with .Values.hub.annotations }} + {{- . | toYaml | nindent 8 }} + {{- end }} + spec: + {{- if .Values.scheduling.podPriority.enabled }} + priorityClassName: {{ include "jupyterhub.priority.fullname" . }} + {{- end }} + {{- with .Values.hub.nodeSelector }} + nodeSelector: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- with concat .Values.scheduling.corePods.tolerations .Values.hub.tolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- include "jupyterhub.coreAffinity" . | nindent 6 }} + volumes: + - name: config + configMap: + name: {{ include "jupyterhub.hub.fullname" . }} + - name: secret + secret: + secretName: {{ include "jupyterhub.hub.fullname" . }} + {{- with (include "jupyterhub.hub-existing-secret.fullname" .) }} + - name: existing-secret + secret: + secretName: {{ . }} + {{- end }} + {{- if .Values.hub.extraFiles }} + - name: files + secret: + secretName: {{ include "jupyterhub.hub.fullname" . }} + items: + {{- range $file_key, $file_details := .Values.hub.extraFiles }} + - key: {{ $file_key | quote }} + path: {{ $file_key | quote }} + {{- with $file_details.mode }} + mode: {{ . }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.hub.extraVolumes }} + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- if eq .Values.hub.db.type "sqlite-pvc" }} + - name: pvc + persistentVolumeClaim: + claimName: {{ include "jupyterhub.hub-pvc.fullname" . }} + {{- end }} + {{- with include "jupyterhub.hub-serviceaccount.fullname" . }} + serviceAccountName: {{ . }} + {{- end }} + {{- with .Values.hub.podSecurityContext }} + securityContext: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.hub.image) }} + imagePullSecrets: {{ . }} + {{- end }} + {{- with .Values.hub.initContainers }} + initContainers: + {{- . | toYaml | nindent 8 }} + {{- end }} + containers: + {{- with .Values.hub.extraContainers }} + {{- . | toYaml | nindent 8 }} + {{- end }} + - name: hub + image: {{ .Values.hub.image.name }}:{{ .Values.hub.image.tag }} + {{- with .Values.hub.command }} + command: + {{- range . }} + - {{ tpl . $ }} + {{- end }} + {{- end }} + args: + {{- /* .Values.hub.args overrides everything the Helm chart otherside would set */}} + {{- if .Values.hub.args }} + {{- range .Values.hub.args }} + - {{ tpl . $ }} + {{- end }} + + {{- /* .Values.hub.args didn't replace the default logic */}} + {{- else }} + - jupyterhub + - --config + - /usr/local/etc/jupyterhub/jupyterhub_config.py + {{- if .Values.debug.enabled }} + - --debug + {{- end }} + {{- /* NOTE: + We want to do automatic upgrades for sqlite-pvc by default, but + allow users to opt out of that if they want. Users using their own + db need to 'opt in' Go Templates treat nil and "" and false as + 'false', making this code complex. We can probably make this a + one-liner, but doing combinations of boolean vars in go templates + is very inelegant & hard to reason about. + */}} + {{- $upgradeType := typeOf .Values.hub.db.upgrade }} + {{- if eq $upgradeType "bool" }} + {{- /* .Values.hub.db.upgrade has been explicitly set to true or false */}} + {{- if .Values.hub.db.upgrade }} + - --upgrade-db + {{- end }} + {{- else if eq $upgradeType "" }} + {{- /* .Values.hub.db.upgrade is nil */}} + {{- if eq .Values.hub.db.type "sqlite-pvc" }} + - --upgrade-db + {{- end }} + {{- end }} + {{- end }} + volumeMounts: + - mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.py + subPath: jupyterhub_config.py + name: config + - mountPath: /usr/local/etc/jupyterhub/z2jh.py + subPath: z2jh.py + name: config + - mountPath: /usr/local/etc/jupyterhub/config/ + name: config + - mountPath: /usr/local/etc/jupyterhub/secret/ + name: secret + {{- if (include "jupyterhub.hub-existing-secret.fullname" .) }} + - mountPath: /usr/local/etc/jupyterhub/existing-secret/ + name: existing-secret + {{- end }} + {{- range $file_key, $file_details := .Values.hub.extraFiles }} + - mountPath: {{ $file_details.mountPath }} + subPath: {{ $file_key | quote }} + name: files + {{- end }} + {{- with .Values.hub.extraVolumeMounts }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- if eq .Values.hub.db.type "sqlite-pvc" }} + - mountPath: /srv/jupyterhub + name: pvc + {{- with .Values.hub.db.pvc.subPath }} + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.hub.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.hub.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + {{- with .Values.hub.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.hub.lifecycle }} + lifecycle: + {{- . | toYaml | nindent 12 }} + {{- end }} + env: + - name: PYTHONUNBUFFERED + value: "1" + - name: HELM_RELEASE_NAME + value: {{ .Release.Name | quote }} + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIGPROXY_AUTH_TOKEN + valueFrom: + secretKeyRef: + {{- /* NOTE: + References the chart managed k8s Secret even if + hub.existingSecret is specified to avoid using the lookup + function on the user managed k8s Secret which is assumed to + not be possible. + */}} + name: {{ include "jupyterhub.hub.fullname" . }} + key: hub.config.ConfigurableHTTPProxy.auth_token + {{- with .Values.hub.extraEnv }} + {{- include "jupyterhub.extraEnv" . | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: 8081 + {{- if .Values.hub.livenessProbe.enabled }} + {{- /* NOTE: + We don't know how long hub database upgrades could take so having a + liveness probe could be a bit risky unless we put a + initialDelaySeconds value with long enough margin for that to not be + an issue. If it is too short, we could end up aborting database + upgrades midway or ending up in an infinite restart loop. + */}} + livenessProbe: + initialDelaySeconds: {{ .Values.hub.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.hub.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.hub.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.hub.livenessProbe.failureThreshold }} + httpGet: + path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/health + port: http + {{- end }} + {{- if .Values.hub.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.hub.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.hub.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.hub.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.hub.readinessProbe.failureThreshold }} + httpGet: + path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/health + port: http + {{- end }} + {{- with .Values.hub.extraPodSpec }} + {{- . | toYaml | nindent 6 }} + {{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/netpol.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/netpol.yaml new file mode 100644 index 000000000..c92e1cc1f --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/netpol.yaml @@ -0,0 +1,85 @@ +{{- if .Values.hub.networkPolicy.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "jupyterhub.hub.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + - Egress + + # IMPORTANT: + # NetworkPolicy's ingress "from" and egress "to" rule specifications require + # great attention to detail. A quick summary is: + # + # 1. You can provide "from"/"to" rules that provide access either ports or a + # subset of ports. + # 2. You can for each "from"/"to" rule provide any number of + # "sources"/"destinations" of four different kinds. + # - podSelector - targets pods with a certain label in the same namespace as the NetworkPolicy + # - namespaceSelector - targets all pods running in namespaces with a certain label + # - namespaceSelector and podSelector - targets pods with a certain label running in namespaces with a certain label + # - ipBlock - targets network traffic from/to a set of IP address ranges + # + # Read more at: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors + # + ingress: + {{- with .Values.hub.networkPolicy.allowedIngressPorts }} + # allow incoming traffic to these ports independent of source + - ports: + {{- range $port := . }} + - port: {{ $port }} + {{- end }} + {{- end }} + + # allowed pods (hub.jupyter.org/network-access-hub) --> hub + - ports: + - port: http + from: + # source 1 - labeled pods + - podSelector: + matchLabels: + hub.jupyter.org/network-access-hub: "true" + {{- if eq .Values.hub.networkPolicy.interNamespaceAccessLabels "accept" }} + namespaceSelector: + matchLabels: {} # without this, the podSelector would only consider pods in the local namespace + # source 2 - pods in labeled namespaces + - namespaceSelector: + matchLabels: + hub.jupyter.org/network-access-hub: "true" + {{- end }} + + {{- with .Values.hub.networkPolicy.ingress }} + # depends, but default is nothing --> hub + {{- . | toYaml | nindent 4 }} + {{- end }} + + egress: + # hub --> proxy + - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "proxy") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} + ports: + - port: 8001 + + # hub --> singleuser-server + - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "singleuser-server") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} + ports: + - port: 8888 + + {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.hub.networkPolicy)) }} + {{- . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/pdb.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/pdb.yaml new file mode 100644 index 000000000..eb8a4ac8c --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/pdb.yaml @@ -0,0 +1,19 @@ +{{- if .Values.hub.pdb.enabled -}} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "jupyterhub.hub.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + {{- if not (typeIs "" .Values.hub.pdb.maxUnavailable) }} + maxUnavailable: {{ .Values.hub.pdb.maxUnavailable }} + {{- end }} + {{- if not (typeIs "" .Values.hub.pdb.minAvailable) }} + minAvailable: {{ .Values.hub.pdb.minAvailable }} + {{- end }} + selector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/pvc.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/pvc.yaml new file mode 100644 index 000000000..e9b22e858 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/pvc.yaml @@ -0,0 +1,26 @@ +{{- if eq .Values.hub.db.type "sqlite-pvc" -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "jupyterhub.hub-pvc.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + {{- with .Values.hub.db.pvc.annotations }} + annotations: + {{- . | toYaml | nindent 4 }} + {{- end }} +spec: + {{- with .Values.hub.db.pvc.selector }} + selector: + {{- . | toYaml | nindent 4 }} + {{- end }} + {{- if typeIs "string" .Values.hub.db.pvc.storageClassName }} + storageClassName: {{ .Values.hub.db.pvc.storageClassName | quote }} + {{- end }} + accessModes: + {{- .Values.hub.db.pvc.accessModes | toYaml | nindent 4 }} + resources: + requests: + storage: {{ .Values.hub.db.pvc.storage | quote }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/rbac.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/rbac.yaml new file mode 100644 index 000000000..fc1770297 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/rbac.yaml @@ -0,0 +1,32 @@ +{{- if .Values.rbac.create -}} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "jupyterhub.hub.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +rules: + - apiGroups: [""] # "" indicates the core API group + resources: ["pods", "persistentvolumeclaims", "secrets", "services"] + verbs: ["get", "watch", "list", "create", "delete"] + - apiGroups: [""] # "" indicates the core API group + resources: ["events"] + verbs: ["get", "watch", "list"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "jupyterhub.hub.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }} + namespace: {{ .Values.namespace }} +roleRef: + kind: Role + name: {{ include "jupyterhub.hub.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/secret.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/secret.yaml new file mode 100644 index 000000000..6caf01cdb --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/secret.yaml @@ -0,0 +1,51 @@ +kind: Secret +apiVersion: v1 +metadata: + name: {{ include "jupyterhub.hub.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +type: Opaque +data: + {{- $values := merge dict .Values }} + {{- /* also passthrough subset of Chart / Release */}} + {{- $_ := set $values "Chart" (dict "Name" .Chart.Name "Version" .Chart.Version) }} + {{- $_ := set $values "Release" (pick .Release "Name" "Namespace" "Service") }} + values.yaml: {{ $values | toYaml | b64enc | quote }} + + {{- with .Values.hub.db.password }} + # Used to mount MYSQL_PWD or PGPASSWORD on hub pod, unless hub.existingSecret + # is set as then that k8s Secret's value must be specified instead. + hub.db.password: {{ . | b64enc | quote }} + {{- end }} + + # Any JupyterHub Services api_tokens are exposed in this k8s Secret as a + # convinience for external services running in the k8s cluster that could + # mount them directly from this k8s Secret. + {{- range $key, $service := .Values.hub.services }} + hub.services.{{ $key }}.apiToken: {{ include "jupyterhub.hub.services.get_api_token" (list $ $key) | b64enc | quote }} + {{- end }} + + # During Helm template rendering, these values that can be autogenerated for + # users are set using the following logic: + # + # 1. Use chart configuration's value + # 2. Use k8s Secret's value + # 3. Use a new autogenerated value + # + # hub.config.ConfigurableHTTPProxy.auth_token: for hub to proxy-api authorization (JupyterHub.proxy_auth_token is deprecated) + # hub.config.JupyterHub.cookie_secret: for cookie encryption + # hub.config.CryptKeeper.keys: for auth state encryption + # + hub.config.ConfigurableHTTPProxy.auth_token: {{ include "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" . | required "This should not happen: blank output from 'jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token' template" | b64enc | quote }} + hub.config.JupyterHub.cookie_secret: {{ include "jupyterhub.hub.config.JupyterHub.cookie_secret" . | required "This should not happen: blank output from 'jupyterhub.hub.config.JupyterHub.cookie_secret' template" | b64enc | quote }} + hub.config.CryptKeeper.keys: {{ include "jupyterhub.hub.config.CryptKeeper.keys" . | required "This should not happen: blank output from 'jupyterhub.hub.config.CryptKeeper.keys' template" | b64enc | quote }} + + {{- with include "jupyterhub.extraFiles.data" .Values.hub.extraFiles }} + {{- . | nindent 2 }} + {{- end }} + +{{- with include "jupyterhub.extraFiles.stringData" .Values.hub.extraFiles }} +stringData: + {{- . | nindent 2 }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/service.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/service.yaml new file mode 100644 index 000000000..497f27cf6 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/service.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "jupyterhub.hub.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + annotations: + {{- if not (index .Values.hub.service.annotations "prometheus.io/scrape") }} + prometheus.io/scrape: "true" + {{- end }} + {{- if not (index .Values.hub.service.annotations "prometheus.io/path") }} + prometheus.io/path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/metrics + {{- end }} + {{- if not (index .Values.hub.service.annotations "prometheus.io/port") }} + prometheus.io/port: "8081" + {{- end }} + {{- with .Values.hub.service.annotations }} + {{- . | toYaml | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.hub.service.type }} + {{- with .Values.hub.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + selector: + {{- include "jupyterhub.matchLabels" . | nindent 4 }} + ports: + - name: hub + port: 8081 + targetPort: http + {{- with .Values.hub.service.ports.nodePort }} + nodePort: {{ . }} + {{- end }} + + {{- with .Values.hub.service.extraPorts }} + {{- . | toYaml | nindent 4 }} + {{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/serviceaccount.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/serviceaccount.yaml new file mode 100644 index 000000000..600b41b1c --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/hub/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.hub.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }} + namespace: {{ .Values.namespace }} + {{- with .Values.hub.serviceAccount.annotations }} + annotations: + {{- . | toYaml | nindent 4 }} + {{- end }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-pull-secret.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-pull-secret.yaml new file mode 100644 index 000000000..e7793cf0a --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-pull-secret.yaml @@ -0,0 +1,16 @@ +{{- if .Values.imagePullSecret.create }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ include "jupyterhub.image-pull-secret.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation + "helm.sh/hook-weight": "-20" +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ include "jupyterhub.dockerconfigjson" . }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/_helpers-daemonset.tpl b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/_helpers-daemonset.tpl new file mode 100644 index 000000000..fd0af77f0 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/_helpers-daemonset.tpl @@ -0,0 +1,280 @@ +{{- /* +Returns an image-puller daemonset. Two daemonsets will be created like this. +- hook-image-puller: for pre helm upgrade image pulling (lives temporarily) +- continuous-image-puller: for newly added nodes image pulling +*/}} +{{- define "jupyterhub.imagePuller.daemonset" -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + {{- if .hook }} + name: {{ include "jupyterhub.hook-image-puller.fullname" . }} + {{- else }} + name: {{ include "jupyterhub.continuous-image-puller.fullname" . }} + {{- end }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + {{- if .hook }} + hub.jupyter.org/deletable: "true" + {{- end }} + {{- if .hook }} + annotations: + {{- /* + Allows the daemonset to be deleted when the image-awaiter job is completed. + */}} + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + "helm.sh/hook-weight": "-10" + {{- end }} +spec: + selector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 100% + {{- if not (typeIs "" .Values.prePuller.revisionHistoryLimit) }} + revisionHistoryLimit: {{ .Values.prePuller.revisionHistoryLimit }} + {{- end }} + template: + metadata: + labels: + {{- include "jupyterhub.matchLabels" . | nindent 8 }} + {{- with .Values.prePuller.annotations }} + annotations: + {{- . | toYaml | nindent 8 }} + {{- end }} + spec: + {{- /* + image-puller pods are made evictable to save on the k8s pods + per node limit all k8s clusters have and have a higher priority + than user-placeholder pods that could block an entire node. + */}} + {{- if .Values.scheduling.podPriority.enabled }} + priorityClassName: {{ include "jupyterhub.image-puller-priority.fullname" . }} + {{- end }} + {{- with .Values.singleuser.nodeSelector }} + nodeSelector: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- with concat .Values.scheduling.userPods.tolerations .Values.singleuser.extraTolerations .Values.prePuller.extraTolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- if include "jupyterhub.userNodeAffinityRequired" . }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + {{- include "jupyterhub.userNodeAffinityRequired" . | nindent 14 }} + {{- end }} + terminationGracePeriodSeconds: 0 + automountServiceAccountToken: false + {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.singleuser.image) }} + imagePullSecrets: {{ . }} + {{- end }} + initContainers: + {{- /* --- Conditionally pull an image all user pods will use in an initContainer --- */}} + {{- $blockWithIptables := hasKey .Values.singleuser.cloudMetadata "enabled" | ternary (not .Values.singleuser.cloudMetadata.enabled) .Values.singleuser.cloudMetadata.blockWithIptables }} + {{- if $blockWithIptables }} + - name: image-pull-metadata-block + image: {{ .Values.singleuser.networkTools.image.name }}:{{ .Values.singleuser.networkTools.image.tag }} + {{- with .Values.singleuser.networkTools.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + command: + - /bin/sh + - -c + - echo "Pulling complete" + {{- with .Values.prePuller.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.prePuller.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + + {{- /* --- Pull default image --- */}} + - name: image-pull-singleuser + image: {{ .Values.singleuser.image.name }}:{{ .Values.singleuser.image.tag }} + command: + - /bin/sh + - -c + - echo "Pulling complete" + {{- with .Values.prePuller.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.prePuller.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + + {{- /* --- Pull extra containers' images --- */}} + {{- range $k, $container := concat .Values.singleuser.initContainers .Values.singleuser.extraContainers }} + - name: image-pull-singleuser-init-and-extra-containers-{{ $k }} + image: {{ $container.image }} + command: + - /bin/sh + - -c + - echo "Pulling complete" + {{- with $.Values.prePuller.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with $.Values.prePuller.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + + {{- /* --- Conditionally pull profileList images --- */}} + {{- if .Values.prePuller.pullProfileListImages }} + {{- range $k, $container := .Values.singleuser.profileList }} + {{- /* profile's kubespawner_override */}} + {{- if $container.kubespawner_override }} + {{- if $container.kubespawner_override.image }} + - name: image-pull-singleuser-profilelist-{{ $k }} + image: {{ $container.kubespawner_override.image }} + command: + - /bin/sh + - -c + - echo "Pulling complete" + {{- with $.Values.prePuller.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with $.Values.prePuller.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- end }} + {{- /* kubespawner_override in profile's profile_options */}} + {{- if $container.profile_options }} + {{- range $option, $option_spec := $container.profile_options }} + {{- if $option_spec.choices }} + {{- range $choice, $choice_spec := $option_spec.choices }} + {{- if $choice_spec.kubespawner_override }} + {{- if $choice_spec.kubespawner_override.image }} + - name: image-pull-profile-{{ $k }}-option-{{ $option }}-{{ $choice }} + image: {{ $choice_spec.kubespawner_override.image }} + command: + - /bin/sh + - -c + - echo "Pulling complete" + {{- with $.Values.prePuller.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with $.Values.prePuller.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- /* --- Pull extra images --- */}} + {{- range $k, $v := .Values.prePuller.extraImages }} + - name: image-pull-{{ $k }} + image: {{ $v.name }}:{{ $v.tag }} + command: + - /bin/sh + - -c + - echo "Pulling complete" + {{- with $.Values.prePuller.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with $.Values.prePuller.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- end }} + containers: + - name: pause + image: {{ .Values.prePuller.pause.image.name }}:{{ .Values.prePuller.pause.image.tag }} + {{- with .Values.prePuller.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.prePuller.pause.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} +{{- end }} + + +{{- /* + Returns a rendered k8s DaemonSet resource: continuous-image-puller +*/}} +{{- define "jupyterhub.imagePuller.daemonset.continuous" -}} + {{- $_ := merge (dict "hook" false "componentPrefix" "continuous-") . }} + {{- include "jupyterhub.imagePuller.daemonset" $_ }} +{{- end }} + + +{{- /* + Returns a rendered k8s DaemonSet resource: hook-image-puller +*/}} +{{- define "jupyterhub.imagePuller.daemonset.hook" -}} + {{- $_ := merge (dict "hook" true "componentPrefix" "hook-") . }} + {{- include "jupyterhub.imagePuller.daemonset" $_ }} +{{- end }} + + +{{- /* + Returns a checksum of the rendered k8s DaemonSet resource: hook-image-puller + + This checksum is used when prePuller.hook.pullOnlyOnChanges=true to decide if + it is worth creating the hook-image-puller associated resources. +*/}} +{{- define "jupyterhub.imagePuller.daemonset.hook.checksum" -}} + {{- /* + We pin componentLabel and Chart.Version as doing so can pin labels + of no importance if they would change. Chart.Name is also pinned as + a harmless technical workaround when we compute the checksum. + */}} + {{- $_ := merge (dict "componentLabel" "pinned" "Chart" (dict "Name" "jupyterhub" "Version" "pinned")) . -}} + {{- $yaml := include "jupyterhub.imagePuller.daemonset.hook" $_ }} + {{- $yaml | sha256sum }} +{{- end }} + + +{{- /* + Returns a truthy string or a blank string depending on if the + hook-image-puller should be installed. The truthy strings are comments + that summarize the state that led to returning a truthy string. + + - prePuller.hook.enabled must be true + - if prePuller.hook.pullOnlyOnChanges is true, the checksum of the + hook-image-puller daemonset must differ since last upgrade +*/}} +{{- define "jupyterhub.imagePuller.daemonset.hook.install" -}} + {{- if .Values.prePuller.hook.enabled }} + {{- if .Values.prePuller.hook.pullOnlyOnChanges }} + {{- $new_checksum := include "jupyterhub.imagePuller.daemonset.hook.checksum" . }} + {{- $k8s_state := lookup "v1" "ConfigMap" .Release.Namespace (include "jupyterhub.hub.fullname" .) | default (dict "data" (dict)) }} + {{- $old_checksum := index $k8s_state.data "checksum_hook-image-puller" | default "" }} + {{- if ne $new_checksum $old_checksum -}} +# prePuller.hook.enabled={{ .Values.prePuller.hook.enabled }} +# prePuller.hook.pullOnlyOnChanges={{ .Values.prePuller.hook.pullOnlyOnChanges }} +# post-upgrade checksum != pre-upgrade checksum (of the hook-image-puller DaemonSet) +# "{{ $new_checksum }}" != "{{ $old_checksum}}" + {{- end }} + {{- else -}} +# prePuller.hook.enabled={{ .Values.prePuller.hook.enabled }} +# prePuller.hook.pullOnlyOnChanges={{ .Values.prePuller.hook.pullOnlyOnChanges }} + {{- end }} + {{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/daemonset-continuous.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/daemonset-continuous.yaml new file mode 100644 index 000000000..85a572fd7 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/daemonset-continuous.yaml @@ -0,0 +1,8 @@ +{{- /* +The continuous-image-puller daemonset task is to pull required images to nodes +that are added in between helm upgrades, for example by manually adding a node +or by the cluster autoscaler. +*/}} +{{- if .Values.prePuller.continuous.enabled }} +{{- include "jupyterhub.imagePuller.daemonset.continuous" . }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/daemonset-hook.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/daemonset-hook.yaml new file mode 100644 index 000000000..7e9c2d0f8 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/daemonset-hook.yaml @@ -0,0 +1,9 @@ +{{- /* +The hook-image-puller daemonset will be created with the highest priority during +helm upgrades. It's task is to pull the required images on all nodes. When the +image-awaiter job confirms the required images to be pulled, the daemonset is +deleted. Only then will the actual helm upgrade start. +*/}} +{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}} +{{- include "jupyterhub.imagePuller.daemonset.hook" . }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/job.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/job.yaml new file mode 100644 index 000000000..9119551c4 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/job.yaml @@ -0,0 +1,77 @@ +{{- /* +This job has a part to play in a helm upgrade process. It simply waits for the +hook-image-puller daemonset which is started slightly before this job to get +its' pods running. If all those pods are running they must have pulled all the +required images on all nodes as they are used as init containers with a dummy +command. +*/}} +{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + hub.jupyter.org/deletable: "true" + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + "helm.sh/hook-weight": "10" +spec: + template: + # The hook-image-awaiter Job and hook-image-puller DaemonSet was + # conditionally created based on this state: + # + {{- include "jupyterhub.imagePuller.daemonset.hook.install" . | nindent 4 }} + # + metadata: + labels: + {{- /* Changes here will cause the Job to restart the pods. */}} + {{- include "jupyterhub.matchLabels" . | nindent 8 }} + {{- with .Values.prePuller.labels }} + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- with .Values.prePuller.annotations }} + annotations: + {{- . | toYaml | nindent 8 }} + {{- end }} + spec: + restartPolicy: Never + {{- with include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }} + serviceAccountName: {{ . }} + {{- end }} + {{- with .Values.prePuller.hook.nodeSelector }} + nodeSelector: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- with concat .Values.scheduling.corePods.tolerations .Values.prePuller.hook.tolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.prePuller.hook.image) }} + imagePullSecrets: {{ . }} + {{- end }} + containers: + - image: {{ .Values.prePuller.hook.image.name }}:{{ .Values.prePuller.hook.image.tag }} + name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }} + {{- with .Values.prePuller.hook.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + command: + - /image-awaiter + - -ca-path=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - -auth-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + - -api-server-address=https://kubernetes.default.svc:$(KUBERNETES_SERVICE_PORT) + - -namespace={{ .Release.Namespace }} + - -daemonset={{ include "jupyterhub.hook-image-puller.fullname" . }} + - -pod-scheduling-wait-duration={{ .Values.prePuller.hook.podSchedulingWaitDuration }} + {{- with .Values.prePuller.hook.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.prePuller.hook.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/priorityclass.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/priorityclass.yaml new file mode 100644 index 000000000..b2256199a --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/priorityclass.yaml @@ -0,0 +1,19 @@ +{{- if .Values.scheduling.podPriority.enabled }} +{{- if or .Values.prePuller.hook.enabled .Values.prePuller.continuous.enabled -}} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ include "jupyterhub.image-puller-priority.fullname" . }} + namespace: {{ .Values.namespace }} + annotations: + meta.helm.sh/release-name: "{{ .Chart.Name }}" + meta.helm.sh/release-namespace: "{{ .Chart.Namespace }}" + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +value: {{ .Values.scheduling.podPriority.imagePullerPriority }} +globalDefault: false +description: >- + Enables [hook|continuous]-image-puller pods to fit on nodes even though they + are clogged by user-placeholder pods, while not evicting normal user pods. +{{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/rbac.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/rbac.yaml new file mode 100644 index 000000000..b06a11cb0 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/rbac.yaml @@ -0,0 +1,47 @@ +{{- /* +Permissions to be used by the hook-image-awaiter job +*/}} +{{- if .Values.rbac.create -}} +{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + hub.jupyter.org/deletable: "true" + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + "helm.sh/hook-weight": "0" +rules: + - apiGroups: ["apps"] # "" indicates the core API group + resources: ["daemonsets"] + verbs: ["get"] +--- +{{- /* +... as declared by this binding. +*/}} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + hub.jupyter.org/deletable: "true" + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + "helm.sh/hook-weight": "0" +subjects: + - kind: ServiceAccount + name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }} + namespace: {{ .Values.namespace }} +roleRef: + kind: Role + name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/serviceaccount.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/serviceaccount.yaml new file mode 100644 index 000000000..e510316c6 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/image-puller/serviceaccount.yaml @@ -0,0 +1,22 @@ +{{- /* +ServiceAccount for the pre-puller hook's image-awaiter-job +*/}} +{{- if .Values.prePuller.hook.serviceAccount.create -}} +{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + hub.jupyter.org/deletable: "true" + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + "helm.sh/hook-weight": "0" + {{- with .Values.prePuller.hook.serviceAccount.annotations }} + {{- . | toYaml | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/ingress.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/ingress.yaml new file mode 100644 index 000000000..d5ec28851 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/ingress.yaml @@ -0,0 +1,43 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "jupyterhub.ingress.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- . | toYaml | nindent 4 }} + {{- end }} +spec: + {{- with .Values.ingress.ingressClassName }} + ingressClassName: "{{ . }}" + {{- end }} + rules: + {{- range $host := .Values.ingress.hosts | default (list "") }} + - http: + paths: + - path: {{ $.Values.hub.baseUrl | trimSuffix "/" }}/{{ $.Values.ingress.pathSuffix }} + pathType: {{ $.Values.ingress.pathType }} + backend: + service: + name: {{ include "jupyterhub.proxy-public.fullname" $ }} + port: + name: http + - path: {{ $.Values.hub.baseUrl | trimSuffix "/" }}/{{ $.Values.ingress.userPathSuffix }} + pathType: {{ $.Values.ingress.pathType }} + backend: + service: + name: {{ include "jupyterhub.proxy-public.fullname" $ }} + port: + name: http + {{- if $host }} + host: {{ $host | quote }} + {{- end }} + {{- end }} + {{- with .Values.ingress.tls }} + tls: + {{- . | toYaml | nindent 4 }} + {{- end }} + {{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml new file mode 100644 index 000000000..0e2a8f412 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml @@ -0,0 +1,109 @@ +{{- define "jupyterhub.dynamic.yaml" -}} +# Content of dynamic.yaml to be merged merged with +# proxy.traefik.extraDynamicConfig. +# ---------------------------------------------------------------------------- +http: + # Middlewares tweaks requests. We define them here and reference them in + # our routers. We use them to redirect http traffic and headers to proxied + # web requests. + # + # ref: https://docs.traefik.io/middlewares/overview/ + middlewares: + hsts: + # A middleware to add a HTTP Strict-Transport-Security (HSTS) response + # header, they function as a request for browsers to enforce HTTPS on + # their end in for a given time into the future, and optionally + # subdomains for requests to subdomains as well. + # + # ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security + headers: + stsIncludeSubdomains: {{ .Values.proxy.traefik.hsts.includeSubdomains }} + stsPreload: {{ .Values.proxy.traefik.hsts.preload }} + stsSeconds: {{ .Values.proxy.traefik.hsts.maxAge }} + # A middleware to redirect to https + redirect: + redirectScheme: + permanent: true + scheme: https + # A middleware to add a X-Scheme (X-Forwarded-Proto) header that + # JupyterHub's Tornado web-server needs if expecting to serve https + # traffic. Without it we would run into issues like: + # https://github.com/jupyterhub/jupyterhub/issues/2284 + scheme: + headers: + customRequestHeaders: + # DISCUSS ME: Can we use the X-Forwarded-Proto header instead? It + # seems more recognized. Mozilla calls it the de-facto standard + # header for this purpose, and Tornado recognizes both. + # + # ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-Proto + # ref: https://www.tornadoweb.org/en/stable/httpserver.html#http-server + X-Scheme: https + + # Routers routes web requests to a service and optionally tweaks them with + # middleware. + # + # ref: https://docs.traefik.io/routing/routers/ + routers: + # Route secure https traffic to the configurable-http-proxy managed by + # JupyterHub. + default: + entrypoints: + - "https" + middlewares: + - "hsts" + - "scheme" + rule: PathPrefix(`/`) + service: default + # Use our predefined TLS options and certificate resolver, enabling + # this route to act as a TLS termination proxy with high security + # standards. + tls: + certResolver: default + domains: + {{- range $host := .Values.proxy.https.hosts }} + - main: {{ $host }} + {{- end }} + options: default + + # Route insecure http traffic to https + insecure: + entrypoints: + - "http" + middlewares: + - "redirect" + rule: PathPrefix(`/`) + service: default + + # Services represents the destinations we route traffic to. + # + # ref: https://docs.traefik.io/routing/services/ + services: + # Represents the configurable-http-proxy (chp) server that is managed by + # JupyterHub to route traffic both to itself and to user pods. + default: + loadBalancer: + servers: + - url: 'http://proxy-http:8000/' + +# Configure TLS to give us an A+ in the ssllabs.com test +# +# ref: https://www.ssllabs.com/ssltest/ +tls: + options: + default: + # Allowed ciphers adapted from Mozillas SSL Configuration Generator + # configured for Intermediate support which doesn't support very old + # systems but doesn't require very modern either. + # + # ref: https://ssl-config.mozilla.org/#server=traefik&version=2.1.2&config=intermediate&guideline=5.4 + cipherSuites: + - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 + minVersion: VersionTLS12 + sniStrict: true +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml new file mode 100644 index 000000000..7287a7065 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml @@ -0,0 +1,68 @@ +{{- define "jupyterhub.traefik.yaml" -}} +# Content of traefik.yaml to be merged merged with +# proxy.traefik.extraStaticConfig. +# ---------------------------------------------------------------------------- + +# Config of logs about web requests +# +# ref: https://docs.traefik.io/observability/access-logs/ +accessLog: + # Redact commonly sensitive headers + fields: + headers: + names: + Authorization: redacted + Cookie: redacted + Set-Cookie: redacted + X-Xsrftoken: redacted + # Only log errors + filters: + statusCodes: + - 500-599 + +# Automatically acquire certificates certificates form a Certificate +# Authority (CA) like Let's Encrypt using the ACME protocol's HTTP-01 +# challenge. +# +# ref: https://docs.traefik.io/https/acme/#certificate-resolvers +certificatesResolvers: + default: + acme: + caServer: {{ .Values.proxy.https.letsencrypt.acmeServer }} + email: {{ .Values.proxy.https.letsencrypt.contactEmail }} + httpChallenge: + entryPoint: http + storage: /etc/acme/acme.json + +# Let Traefik listen to port 80 and port 443 +# +# ref: https://docs.traefik.io/routing/entrypoints/ +entryPoints: + # Port 80, used for: + # - ACME HTTP-01 challenges + # - Redirects to HTTPS + http: + address: ':8080' + # Port 443, used for: + # - TLS Termination Proxy, where HTTPS transitions to HTTP. + https: + address: ':8443' + # Configure a high idle timeout for our websockets connections + transport: + respondingTimeouts: + idleTimeout: 10m0s + +# Config of logs about what happens to Traefik itself (startup, +# configuration, events, shutdown, and so on). +# +# ref: https://docs.traefik.io/observability/logs +log: + level: {{ if .Values.debug.enabled -}} DEBUG {{- else -}} WARN {{- end }} + +# Let Traefik monitor another file we mount for dynamic configuration. As we +# mount this file through this configmap, we can make a `kubectl edit` on the +# configmap and have Traefik update on changes to dynamic.yaml. +providers: + file: + filename: /etc/traefik/dynamic.yaml +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/configmap.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/configmap.yaml new file mode 100644 index 000000000..1ba6e8c1e --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/configmap.yaml @@ -0,0 +1,29 @@ +{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) }} +{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) }} +{{- if $autoHTTPS -}} +{{- $_ := .Values.proxy.https.letsencrypt.contactEmail | required "proxy.https.letsencrypt.contactEmail is a required field" -}} + +# This configmap contains Traefik configuration files to be mounted. +# - traefik.yaml will only be read during startup (static configuration) +# - dynamic.yaml will be read on change (dynamic configuration) +# +# ref: https://docs.traefik.io/getting-started/configuration-overview/ +# +# The configuration files are first rendered with Helm templating to large YAML +# strings. Then we use the fromYAML function on these strings to get an object, +# that we in turn merge with user provided extra configuration. +# +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ include "jupyterhub.autohttps.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +data: + traefik.yaml: | + {{- include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.proxy.traefik.extraStaticConfig | toYaml | nindent 4 }} + dynamic.yaml: | + {{- include "jupyterhub.dynamic.yaml" . | fromYaml | merge .Values.proxy.traefik.extraDynamicConfig | toYaml | nindent 4 }} + +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/deployment.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/deployment.yaml new file mode 100644 index 000000000..644decd63 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/deployment.yaml @@ -0,0 +1,155 @@ +{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) }} +{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) }} +{{- if $autoHTTPS -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "jupyterhub.autohttps.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + {{- if not (typeIs "" .Values.proxy.traefik.revisionHistoryLimit) }} + revisionHistoryLimit: {{ .Values.proxy.traefik.revisionHistoryLimit }} + {{- end }} + replicas: 1 + selector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "jupyterhub.matchLabels" . | nindent 8 }} + hub.jupyter.org/network-access-proxy-http: "true" + {{- with .Values.proxy.traefik.labels }} + {{- . | toYaml | nindent 8 }} + {{- end }} + annotations: + # Only force a restart through a change to this checksum when the static + # configuration is changed, as the dynamic can be updated after start. + # Any disruptions to this deployment impacts everything, it is the + # entrypoint of all network traffic. + checksum/static-config: {{ include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.proxy.traefik.extraStaticConfig | toYaml | sha256sum }} + spec: + {{- with include "jupyterhub.autohttps-serviceaccount.fullname" . }} + serviceAccountName: {{ . }} + {{- end }} + {{- if .Values.scheduling.podPriority.enabled }} + priorityClassName: {{ include "jupyterhub.priority.fullname" . }} + {{- end }} + {{- with .Values.proxy.traefik.nodeSelector }} + nodeSelector: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- with concat .Values.scheduling.corePods.tolerations .Values.proxy.traefik.tolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- include "jupyterhub.coreAffinity" . | nindent 6 }} + volumes: + - name: certificates + emptyDir: {} + - name: traefik-config + configMap: + name: {{ include "jupyterhub.autohttps.fullname" . }} + {{- with .Values.proxy.traefik.extraVolumes }} + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.proxy.traefik.image) }} + imagePullSecrets: {{ . }} + {{- end }} + initContainers: + - name: load-acme + image: "{{ .Values.proxy.secretSync.image.name }}:{{ .Values.proxy.secretSync.image.tag }}" + {{- with .Values.proxy.secretSync.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + args: + - load + - {{ include "jupyterhub.proxy-public-tls.fullname" . }} + - acme.json + - /etc/acme/acme.json + env: + # We need this to get logs immediately + - name: PYTHONUNBUFFERED + value: "True" + {{- with .Values.proxy.traefik.extraEnv }} + {{- include "jupyterhub.extraEnv" . | nindent 12 }} + {{- end }} + volumeMounts: + - name: certificates + mountPath: /etc/acme + {{- with .Values.proxy.secretSync.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.proxy.traefik.extraInitContainers }} + {{- . | toYaml | nindent 8 }} + {{- end }} + containers: + - name: traefik + image: "{{ .Values.proxy.traefik.image.name }}:{{ .Values.proxy.traefik.image.tag }}" + {{- with .Values.proxy.traefik.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + {{- with .Values.proxy.traefik.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: 8080 + - name: https + containerPort: 8443 + {{- with .Values.proxy.traefik.extraPorts }} + {{- . | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: traefik-config + mountPath: /etc/traefik + - name: certificates + mountPath: /etc/acme + {{- with .Values.proxy.traefik.extraVolumeMounts }} + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.proxy.traefik.extraEnv }} + env: + {{- include "jupyterhub.extraEnv" . | nindent 12 }} + {{- end }} + {{- with .Values.proxy.traefik.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + - name: secret-sync + image: "{{ .Values.proxy.secretSync.image.name }}:{{ .Values.proxy.secretSync.image.tag }}" + {{- with .Values.proxy.secretSync.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + {{- with .Values.proxy.secretSync.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} + args: + - watch-save + - --label=app={{ include "jupyterhub.appLabel" . }} + - --label=release={{ .Release.Name }} + - --label=chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + - --label=heritage=secret-sync + - {{ include "jupyterhub.proxy-public-tls.fullname" . }} + - acme.json + - /etc/acme/acme.json + env: + # We need this to get logs immediately + - name: PYTHONUNBUFFERED + value: "True" + volumeMounts: + - name: certificates + mountPath: /etc/acme + {{- with .Values.proxy.secretSync.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.proxy.traefik.extraPodSpec }} + {{- . | toYaml | nindent 6 }} + {{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/netpol.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/netpol.yaml new file mode 100644 index 000000000..b3f5e6a62 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/netpol.yaml @@ -0,0 +1,79 @@ +{{- $HTTPS := .Values.proxy.https.enabled -}} +{{- $autoHTTPS := and $HTTPS (and (eq .Values.proxy.https.type "letsencrypt") .Values.proxy.https.hosts) -}} +{{- if and $autoHTTPS .Values.proxy.traefik.networkPolicy.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "jupyterhub.autohttps.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + - Egress + + # IMPORTANT: + # NetworkPolicy's ingress "from" and egress "to" rule specifications require + # great attention to detail. A quick summary is: + # + # 1. You can provide "from"/"to" rules that provide access either ports or a + # subset of ports. + # 2. You can for each "from"/"to" rule provide any number of + # "sources"/"destinations" of four different kinds. + # - podSelector - targets pods with a certain label in the same namespace as the NetworkPolicy + # - namespaceSelector - targets all pods running in namespaces with a certain label + # - namespaceSelector and podSelector - targets pods with a certain label running in namespaces with a certain label + # - ipBlock - targets network traffic from/to a set of IP address ranges + # + # Read more at: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors + # + ingress: + {{- with .Values.proxy.traefik.networkPolicy.allowedIngressPorts }} + # allow incoming traffic to these ports independent of source + - ports: + {{- range $port := . }} + - port: {{ $port }} + {{- end }} + {{- end }} + + # allowed pods (hub.jupyter.org/network-access-proxy-http) --> proxy (http/https port) + - ports: + - port: http + - port: https + from: + # source 1 - labeled pods + - podSelector: + matchLabels: + hub.jupyter.org/network-access-proxy-http: "true" + {{- if eq .Values.proxy.traefik.networkPolicy.interNamespaceAccessLabels "accept" }} + namespaceSelector: + matchLabels: {} # without this, the podSelector would only consider pods in the local namespace + # source 2 - pods in labeled namespaces + - namespaceSelector: + matchLabels: + hub.jupyter.org/network-access-proxy-http: "true" + {{- end }} + + {{- with .Values.proxy.traefik.networkPolicy.ingress}} + # depends, but default is nothing --> proxy + {{- . | toYaml | nindent 4 }} + {{- end }} + + egress: + # autohttps --> proxy (http port) + - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "proxy") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} + ports: + - port: 8000 + + {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.proxy.traefik.networkPolicy)) }} + {{- . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/pdb.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/pdb.yaml new file mode 100644 index 000000000..e90c939b4 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/pdb.yaml @@ -0,0 +1,19 @@ +{{- if .Values.proxy.traefik.pdb.enabled -}} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: proxy + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + {{- if not (typeIs "" .Values.proxy.traefik.pdb.maxUnavailable) }} + maxUnavailable: {{ .Values.proxy.traefik.pdb.maxUnavailable }} + {{- end }} + {{- if not (typeIs "" .Values.proxy.traefik.pdb.minAvailable) }} + minAvailable: {{ .Values.proxy.traefik.pdb.minAvailable }} + {{- end }} + selector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/rbac.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/rbac.yaml new file mode 100644 index 000000000..20b036ebb --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/rbac.yaml @@ -0,0 +1,37 @@ +{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) -}} +{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) -}} +{{- if $autoHTTPS -}} +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "jupyterhub.autohttps.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + {{- with .Values.proxy.traefik.serviceAccount.annotations }} + annotations: + {{- . | toYaml | nindent 4 }} + {{- end }} +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "patch", "list", "create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "jupyterhub.autohttps.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +subjects: +- kind: ServiceAccount + name: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . }} + namespace: {{ .Values.namespace }} +roleRef: + kind: Role + name: {{ include "jupyterhub.autohttps.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/service.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/service.yaml new file mode 100644 index 000000000..e5bce1703 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/service.yaml @@ -0,0 +1,26 @@ +{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) }} +{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) }} +{{- if $autoHTTPS -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "jupyterhub.proxy-http.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + {{- with .Values.proxy.service.labels }} + {{- . | toYaml | nindent 4 }} + {{- end }} + {{- with .Values.proxy.service.annotations }} + annotations: + {{- . | toYaml | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + selector: + {{- $_ := merge (dict "componentLabel" "proxy") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 4 }} + ports: + - port: 8000 + targetPort: http +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/serviceaccount.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/serviceaccount.yaml new file mode 100644 index 000000000..92619edb7 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/autohttps/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) -}} +{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) -}} +{{- if $autoHTTPS -}} +{{- if .Values.proxy.traefik.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +{{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/deployment.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/deployment.yaml new file mode 100644 index 000000000..930bc8f78 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/deployment.yaml @@ -0,0 +1,179 @@ +{{- $manualHTTPS := and .Values.proxy.https.enabled (eq .Values.proxy.https.type "manual") -}} +{{- $manualHTTPSwithsecret := and .Values.proxy.https.enabled (eq .Values.proxy.https.type "secret") -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "jupyterhub.proxy.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + {{- if not (typeIs "" .Values.proxy.chp.revisionHistoryLimit) }} + revisionHistoryLimit: {{ .Values.proxy.chp.revisionHistoryLimit }} + {{- end }} + replicas: 1 + selector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} + strategy: + {{- .Values.proxy.deploymentStrategy | toYaml | nindent 4 }} + template: + metadata: + labels: + {{- /* Changes here will cause the Deployment to restart the pods. */}} + {{- include "jupyterhub.matchLabels" . | nindent 8 }} + hub.jupyter.org/network-access-hub: "true" + hub.jupyter.org/network-access-singleuser: "true" + {{- with .Values.proxy.labels }} + {{- . | toYaml | nindent 8 }} + {{- end }} + annotations: + # We want to restart proxy only if the auth token changes + # Other changes to the hub config should not restart. + # We truncate to 4 chars to avoid leaking auth token info, + # since someone could brute force the hash to obtain the token + # + # Note that if auth_token has to be generated at random, it will be + # generated at random here separately from being generated at random in + # the k8s Secret template. This will cause this annotation to change to + # match the k8s Secret during the first upgrade following an auth_token + # was generated. + checksum/auth-token: {{ include "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" . | sha256sum | trunc 4 | quote }} + checksum/proxy-secret: {{ include (print $.Template.BasePath "/proxy/secret.yaml") . | sha256sum | quote }} + {{- with .Values.proxy.annotations }} + {{- . | toYaml | nindent 8 }} + {{- end }} + spec: + terminationGracePeriodSeconds: 60 + {{- if .Values.scheduling.podPriority.enabled }} + priorityClassName: {{ include "jupyterhub.priority.fullname" . }} + {{- end }} + {{- with .Values.proxy.chp.nodeSelector }} + nodeSelector: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- with concat .Values.scheduling.corePods.tolerations .Values.proxy.chp.tolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- include "jupyterhub.coreAffinity" . | nindent 6 }} + {{- if $manualHTTPS }} + volumes: + - name: tls-secret + secret: + secretName: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . }} + {{- else if $manualHTTPSwithsecret }} + volumes: + - name: tls-secret + secret: + secretName: {{ .Values.proxy.https.secret.name }} + {{- end }} + {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.proxy.chp.image) }} + imagePullSecrets: {{ . }} + {{- end }} + containers: + - name: chp + image: {{ .Values.proxy.chp.image.name }}:{{ .Values.proxy.chp.image.tag }} + {{- $hubNameAsEnv := include "jupyterhub.hub.fullname" . | upper | replace "-" "_" }} + {{- $hubHost := printf "http://%s:$(%s_SERVICE_PORT)" (include "jupyterhub.hub.fullname" .) $hubNameAsEnv }} + command: + - configurable-http-proxy + - "--ip=" + - "--api-ip=" + - --api-port=8001 + - --default-target={{ .Values.proxy.chp.defaultTarget | default $hubHost }} + - --error-target={{ .Values.proxy.chp.errorTarget | default (printf "%s/hub/error" $hubHost) }} + {{- if $manualHTTPS }} + - --port=8443 + - --redirect-port=8000 + - --redirect-to=443 + - --ssl-key=/etc/chp/tls/tls.key + - --ssl-cert=/etc/chp/tls/tls.crt + {{- else if $manualHTTPSwithsecret }} + - --port=8443 + - --redirect-port=8000 + - --redirect-to=443 + - --ssl-key=/etc/chp/tls/{{ .Values.proxy.https.secret.key }} + - --ssl-cert=/etc/chp/tls/{{ .Values.proxy.https.secret.crt }} + {{- else }} + - --port=8000 + {{- end }} + {{- if .Values.debug.enabled }} + - --log-level=debug + {{- end }} + {{- range .Values.proxy.chp.extraCommandLineFlags }} + - {{ tpl . $ }} + {{- end }} + {{- if or $manualHTTPS $manualHTTPSwithsecret }} + volumeMounts: + - name: tls-secret + mountPath: /etc/chp/tls + readOnly: true + {{- end }} + {{- with .Values.proxy.chp.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} + env: + - name: CONFIGPROXY_AUTH_TOKEN + valueFrom: + secretKeyRef: + # NOTE: References the chart managed k8s Secret even if + # hub.existingSecret is specified to avoid using the + # lookup function on the user managed k8s Secret. + name: {{ include "jupyterhub.hub.fullname" . }} + key: hub.config.ConfigurableHTTPProxy.auth_token + {{- with .Values.proxy.chp.extraEnv }} + {{- include "jupyterhub.extraEnv" . | nindent 12 }} + {{- end }} + {{- with .Values.proxy.chp.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + ports: + {{- if or $manualHTTPS $manualHTTPSwithsecret }} + - name: https + containerPort: 8443 + {{- end }} + - name: http + containerPort: 8000 + - name: api + containerPort: 8001 + {{- if .Values.proxy.chp.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.proxy.chp.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.proxy.chp.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.proxy.chp.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.proxy.chp.livenessProbe.failureThreshold }} + httpGet: + path: /_chp_healthz + {{- if or $manualHTTPS $manualHTTPSwithsecret }} + port: https + scheme: HTTPS + {{- else }} + port: http + scheme: HTTP + {{- end }} + {{- end }} + {{- if .Values.proxy.chp.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.proxy.chp.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.proxy.chp.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.proxy.chp.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.proxy.chp.readinessProbe.failureThreshold }} + httpGet: + path: /_chp_healthz + {{- if or $manualHTTPS $manualHTTPSwithsecret }} + port: https + scheme: HTTPS + {{- else }} + port: http + scheme: HTTP + {{- end }} + {{- end }} + {{- with .Values.proxy.chp.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.proxy.chp.extraPodSpec }} + {{- . | toYaml | nindent 6 }} + {{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/netpol.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/netpol.yaml new file mode 100644 index 000000000..aa062e8ac --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/netpol.yaml @@ -0,0 +1,109 @@ +{{- $HTTPS := .Values.proxy.https.enabled -}} +{{- $autoHTTPS := and $HTTPS (and (eq .Values.proxy.https.type "letsencrypt") .Values.proxy.https.hosts) -}} +{{- $manualHTTPS := and $HTTPS (eq .Values.proxy.https.type "manual") -}} +{{- $manualHTTPSwithsecret := and $HTTPS (eq .Values.proxy.https.type "secret") -}} +{{- if .Values.proxy.chp.networkPolicy.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "jupyterhub.proxy.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + - Egress + + # IMPORTANT: + # NetworkPolicy's ingress "from" and egress "to" rule specifications require + # great attention to detail. A quick summary is: + # + # 1. You can provide "from"/"to" rules that provide access either ports or a + # subset of ports. + # 2. You can for each "from"/"to" rule provide any number of + # "sources"/"destinations" of four different kinds. + # - podSelector - targets pods with a certain label in the same namespace as the NetworkPolicy + # - namespaceSelector - targets all pods running in namespaces with a certain label + # - namespaceSelector and podSelector - targets pods with a certain label running in namespaces with a certain label + # - ipBlock - targets network traffic from/to a set of IP address ranges + # + # Read more at: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors + # + ingress: + {{- with .Values.proxy.chp.networkPolicy.allowedIngressPorts }} + # allow incoming traffic to these ports independent of source + - ports: + {{- range $port := . }} + - port: {{ $port }} + {{- end }} + {{- end }} + + # allowed pods (hub.jupyter.org/network-access-proxy-http) --> proxy (http/https port) + - ports: + - port: http + {{- if or $manualHTTPS $manualHTTPSwithsecret }} + - port: https + {{- end }} + from: + # source 1 - labeled pods + - podSelector: + matchLabels: + hub.jupyter.org/network-access-proxy-http: "true" + {{- if eq .Values.proxy.chp.networkPolicy.interNamespaceAccessLabels "accept" }} + namespaceSelector: + matchLabels: {} # without this, the podSelector would only consider pods in the local namespace + # source 2 - pods in labeled namespaces + - namespaceSelector: + matchLabels: + hub.jupyter.org/network-access-proxy-http: "true" + {{- end }} + + # allowed pods (hub.jupyter.org/network-access-proxy-api) --> proxy (api port) + - ports: + - port: api + from: + # source 1 - labeled pods + - podSelector: + matchLabels: + hub.jupyter.org/network-access-proxy-api: "true" + {{- if eq .Values.proxy.chp.networkPolicy.interNamespaceAccessLabels "accept" }} + namespaceSelector: + matchLabels: {} # without this, the podSelector would only consider pods in the local namespace + # source 2 - pods in labeled namespaces + - namespaceSelector: + matchLabels: + hub.jupyter.org/network-access-proxy-api: "true" + {{- end }} + + {{- with .Values.proxy.chp.networkPolicy.ingress}} + # depends, but default is nothing --> proxy + {{- . | toYaml | nindent 4 }} + {{- end }} + + egress: + # proxy --> hub + - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "hub") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} + ports: + - port: 8081 + + # proxy --> singleuser-server + - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "singleuser-server") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} + ports: + - port: 8888 + + {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.proxy.chp.networkPolicy)) }} + {{- . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/pdb.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/pdb.yaml new file mode 100644 index 000000000..cdb969ca4 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/pdb.yaml @@ -0,0 +1,19 @@ +{{- if .Values.proxy.chp.pdb.enabled -}} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "jupyterhub.proxy.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + {{- if not (typeIs "" .Values.proxy.chp.pdb.maxUnavailable) }} + maxUnavailable: {{ .Values.proxy.chp.pdb.maxUnavailable }} + {{- end }} + {{- if not (typeIs "" .Values.proxy.chp.pdb.minAvailable) }} + minAvailable: {{ .Values.proxy.chp.pdb.minAvailable }} + {{- end }} + selector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/secret.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/secret.yaml new file mode 100644 index 000000000..a2d022f75 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/secret.yaml @@ -0,0 +1,14 @@ +{{- $manualHTTPS := and .Values.proxy.https.enabled (eq .Values.proxy.https.type "manual") -}} +{{- if $manualHTTPS -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +type: kubernetes.io/tls +data: + tls.crt: {{ .Values.proxy.https.manual.cert | required "Required configuration missing: proxy.https.manual.cert" | b64enc }} + tls.key: {{ .Values.proxy.https.manual.key | required "Required configuration missing: proxy.https.manual.key" | b64enc }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/service.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/service.yaml new file mode 100644 index 000000000..986a20457 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/proxy/service.yaml @@ -0,0 +1,85 @@ +{{- $enabled := .Values.proxy.https.enabled -}} +{{- $autoHTTPS := and $enabled (and (eq .Values.proxy.https.type "letsencrypt") .Values.proxy.https.hosts) -}} +{{- $manualHTTPS := and $enabled (eq .Values.proxy.https.type "manual") -}} +{{- $manualHTTPSwithsecret := and $enabled (eq .Values.proxy.https.type "secret") -}} +{{- $offloadHTTPS := and $enabled (eq .Values.proxy.https.type "offload") -}} +{{- $valid := or $autoHTTPS (or $manualHTTPS (or $manualHTTPSwithsecret $offloadHTTPS)) -}} +{{- $HTTPS := and $enabled $valid -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "jupyterhub.proxy-api.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- $_ := merge (dict "componentSuffix" "-api") . }} + {{- include "jupyterhub.labels" $_ | nindent 4 }} +spec: + selector: + {{- include "jupyterhub.matchLabels" . | nindent 4 }} + ports: + - port: 8001 + targetPort: api +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "jupyterhub.proxy-public.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- $_ := merge (dict "componentSuffix" "-public") . }} + {{- include "jupyterhub.labels" $_ | nindent 4 }} + {{- with .Values.proxy.service.labels }} + {{- . | toYaml | nindent 4 }} + {{- end }} + {{- with .Values.proxy.service.annotations }} + annotations: + {{- . | toYaml | nindent 4 }} + {{- end }} +spec: + selector: + # This service will target the autohttps pod if autohttps is configured, and + # the proxy pod if not. When autohttps is configured, the service proxy-http + # will be around to target the proxy pod directly. + {{- if $autoHTTPS }} + {{- $_ := merge (dict "componentLabel" "autohttps") . -}} + {{- include "jupyterhub.matchLabels" $_ | nindent 4 }} + {{- else }} + {{- include "jupyterhub.matchLabels" . | nindent 4 }} + {{- end }} + ports: + {{- if $HTTPS }} + - name: https + port: 443 + # When HTTPS termination is handled outside our helm chart, pass traffic + # coming in via this Service's port 443 to targeted pod's port meant for + # HTTP traffic. + {{- if $offloadHTTPS }} + targetPort: http + {{- else }} + targetPort: https + {{- end }} + {{- with .Values.proxy.service.nodePorts.https }} + nodePort: {{ . }} + {{- end }} + {{- end }} + {{- if ne .Values.proxy.service.disableHttpPort true }} + - name: http + port: 80 + targetPort: http + {{- with .Values.proxy.service.nodePorts.http }} + nodePort: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.proxy.service.extraPorts }} + {{- . | toYaml | nindent 4 }} + {{- end }} + type: {{ .Values.proxy.service.type }} + {{- with .Values.proxy.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + {{- if eq .Values.proxy.service.type "LoadBalancer" }} + {{- with .Values.proxy.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- . | toYaml | nindent 4 }} + {{- end }} + {{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/_scheduling-helpers.tpl b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/_scheduling-helpers.tpl new file mode 100644 index 000000000..0a1a74149 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/_scheduling-helpers.tpl @@ -0,0 +1,138 @@ +{{- define "jupyterhub.userNodeAffinityRequired" -}} +{{- if eq .Values.scheduling.userPods.nodeAffinity.matchNodePurpose "require" -}} +- matchExpressions: + - key: hub.jupyter.org/node-purpose + operator: In + values: [user] +{{- end }} +{{- with .Values.singleuser.extraNodeAffinity.required }} +{{- . | toYaml | nindent 0 }} +{{- end }} +{{- end }} + +{{- define "jupyterhub.userNodeAffinityPreferred" -}} +{{- if eq .Values.scheduling.userPods.nodeAffinity.matchNodePurpose "prefer" -}} +- weight: 100 + preference: + matchExpressions: + - key: hub.jupyter.org/node-purpose + operator: In + values: [user] +{{- end }} +{{- with .Values.singleuser.extraNodeAffinity.preferred }} +{{- . | toYaml | nindent 0 }} +{{- end }} +{{- end }} + +{{- define "jupyterhub.userPodAffinityRequired" -}} +{{- with .Values.singleuser.extraPodAffinity.required -}} +{{ . | toYaml }} +{{- end }} +{{- end }} + +{{- define "jupyterhub.userPodAffinityPreferred" -}} +{{- with .Values.singleuser.extraPodAffinity.preferred -}} +{{ . | toYaml }} +{{- end }} +{{- end }} + +{{- define "jupyterhub.userPodAntiAffinityRequired" -}} +{{- with .Values.singleuser.extraPodAntiAffinity.required -}} +{{ . | toYaml }} +{{- end }} +{{- end }} + +{{- define "jupyterhub.userPodAntiAffinityPreferred" -}} +{{- with .Values.singleuser.extraPodAntiAffinity.preferred -}} +{{ . | toYaml }} +{{- end }} +{{- end }} + + + +{{- /* + jupyterhub.userAffinity: + It is used by user-placeholder to set the same affinity on them as the + spawned user pods spawned by kubespawner. +*/}} +{{- define "jupyterhub.userAffinity" -}} + +{{- $dummy := set . "nodeAffinityRequired" (include "jupyterhub.userNodeAffinityRequired" .) -}} +{{- $dummy := set . "podAffinityRequired" (include "jupyterhub.userPodAffinityRequired" .) -}} +{{- $dummy := set . "podAntiAffinityRequired" (include "jupyterhub.userPodAntiAffinityRequired" .) -}} +{{- $dummy := set . "nodeAffinityPreferred" (include "jupyterhub.userNodeAffinityPreferred" .) -}} +{{- $dummy := set . "podAffinityPreferred" (include "jupyterhub.userPodAffinityPreferred" .) -}} +{{- $dummy := set . "podAntiAffinityPreferred" (include "jupyterhub.userPodAntiAffinityPreferred" .) -}} +{{- $dummy := set . "hasNodeAffinity" (or .nodeAffinityRequired .nodeAffinityPreferred) -}} +{{- $dummy := set . "hasPodAffinity" (or .podAffinityRequired .podAffinityPreferred) -}} +{{- $dummy := set . "hasPodAntiAffinity" (or .podAntiAffinityRequired .podAntiAffinityPreferred) -}} + +{{- if .hasNodeAffinity -}} +nodeAffinity: + {{- if .nodeAffinityRequired }} + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + {{- .nodeAffinityRequired | nindent 6 }} + {{- end }} + + {{- if .nodeAffinityPreferred }} + preferredDuringSchedulingIgnoredDuringExecution: + {{- .nodeAffinityPreferred | nindent 4 }} + {{- end }} +{{- end }} + +{{- if .hasPodAffinity }} +podAffinity: + {{- if .podAffinityRequired }} + requiredDuringSchedulingIgnoredDuringExecution: + {{- .podAffinityRequired | nindent 4 }} + {{- end }} + + {{- if .podAffinityPreferred }} + preferredDuringSchedulingIgnoredDuringExecution: + {{- .podAffinityPreferred | nindent 4 }} + {{- end }} +{{- end }} + +{{- if .hasPodAntiAffinity }} +podAntiAffinity: + {{- if .podAntiAffinityRequired }} + requiredDuringSchedulingIgnoredDuringExecution: + {{- .podAntiAffinityRequired | nindent 4 }} + {{- end }} + + {{- if .podAntiAffinityPreferred }} + preferredDuringSchedulingIgnoredDuringExecution: + {{- .podAntiAffinityPreferred | nindent 4 }} + {{- end }} +{{- end }} + +{{- end }} + + + +{{- define "jupyterhub.coreAffinity" -}} +{{- $require := eq .Values.scheduling.corePods.nodeAffinity.matchNodePurpose "require" -}} +{{- $prefer := eq .Values.scheduling.corePods.nodeAffinity.matchNodePurpose "prefer" -}} +{{- if or $require $prefer -}} +affinity: + nodeAffinity: + {{- if $require }} + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: hub.jupyter.org/node-purpose + operator: In + values: [core] + {{- end }} + {{- if $prefer }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: hub.jupyter.org/node-purpose + operator: In + values: [core] + {{- end }} +{{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/priorityclass.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/priorityclass.yaml new file mode 100644 index 000000000..2fa24d250 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/priorityclass.yaml @@ -0,0 +1,16 @@ +{{- if .Values.scheduling.podPriority.enabled }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ include "jupyterhub.priority.fullname" . }} + namespace: {{ .Values.namespace }} + annotations: + meta.helm.sh/release-name: "{{ .Chart.Name }}" + meta.helm.sh/release-namespace: "{{ .Chart.Namespace }}" + labels: + {{- $_ := merge (dict "componentLabel" "default-priority") . }} + {{- include "jupyterhub.labels" $_ | nindent 4 }} +value: {{ .Values.scheduling.podPriority.defaultPriority }} +globalDefault: {{ .Values.scheduling.podPriority.globalDefault }} +description: "A default priority higher than user placeholders priority." +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-placeholder/pdb.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-placeholder/pdb.yaml new file mode 100644 index 000000000..2fede0afb --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-placeholder/pdb.yaml @@ -0,0 +1,18 @@ +{{- /* +The cluster autoscaler should be allowed to evict and reschedule these pods if +it would help in order to scale down a node. +*/}} +{{- if .Values.scheduling.userPlaceholder.enabled -}} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "jupyterhub.user-placeholder.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + minAvailable: 0 + selector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-placeholder/priorityclass.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-placeholder/priorityclass.yaml new file mode 100644 index 000000000..f30e55f7d --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-placeholder/priorityclass.yaml @@ -0,0 +1,17 @@ +{{- if .Values.scheduling.podPriority.enabled }} +{{- if .Values.scheduling.userPlaceholder.enabled -}} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ include "jupyterhub.user-placeholder-priority.fullname" . }} + namespace: {{ .Values.namespace }} + annotations: + meta.helm.sh/release-name: "{{ .Chart.Name }}" + meta.helm.sh/release-namespace: "{{ .Chart.Namespace }}" + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +value: {{ .Values.scheduling.podPriority.userPlaceholderPriority }} +globalDefault: false +description: "With a priority higher or eqaul to a cluster autoscalers priority cutoff, a pod can trigger a cluster scale up. At the same time, placeholder pods priority should be lower than other pods to make them evictable." +{{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-placeholder/statefulset.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-placeholder/statefulset.yaml new file mode 100644 index 000000000..3e250f344 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-placeholder/statefulset.yaml @@ -0,0 +1,81 @@ + +{{- /* +These user-placeholder pods can be used to test cluster autoscaling in a +controlled fashion. + +Example: +$ echo 'Simulating four users...' +$ kubectl scale sts/user-placeholder --replicas 4 +*/}} +{{- if .Values.scheduling.userPlaceholder.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "jupyterhub.user-placeholder.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + podManagementPolicy: Parallel + {{- if not (typeIs "" .Values.scheduling.userPlaceholder.revisionHistoryLimit) }} + revisionHistoryLimit: {{ .Values.scheduling.userPlaceholder.revisionHistoryLimit }} + {{- end }} + replicas: {{ .Values.scheduling.userPlaceholder.replicas }} + selector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} + serviceName: {{ include "jupyterhub.user-placeholder.fullname" . }} + template: + metadata: + {{- with .Values.scheduling.userPlaceholder.annotations }} + annotations: + {{- . | toYaml | nindent 8 }} + {{- end }} + labels: + {{- /* Changes here will cause the Deployment to restart the pods. */}} + {{- include "jupyterhub.matchLabels" . | nindent 8 }} + {{- with .Values.scheduling.userPlaceholder.labels }} + {{- . | toYaml | nindent 8 }} + {{- end }} + spec: + {{- if .Values.scheduling.podPriority.enabled }} + priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }} + {{- end }} + {{- if .Values.scheduling.userScheduler.enabled }} + schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }} + {{- end }} + {{- with .Values.singleuser.nodeSelector }} + nodeSelector: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- with concat .Values.scheduling.userPods.tolerations .Values.singleuser.extraTolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- if include "jupyterhub.userAffinity" . }} + affinity: + {{- include "jupyterhub.userAffinity" . | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: 0 + automountServiceAccountToken: false + {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.scheduling.userPlaceholder.image) }} + imagePullSecrets: {{ . }} + {{- end }} + containers: + - name: pause + image: {{ .Values.scheduling.userPlaceholder.image.name }}:{{ .Values.scheduling.userPlaceholder.image.tag }} + {{- if .Values.scheduling.userPlaceholder.resources }} + resources: + {{- .Values.scheduling.userPlaceholder.resources | toYaml | nindent 12 }} + {{- else if (include "jupyterhub.singleuser.resources" .) }} + resources: + {{- include "jupyterhub.singleuser.resources" . | nindent 12 }} + {{- end }} + {{- with .Values.scheduling.userPlaceholder.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + {{- with .Values.scheduling.userPlaceholder.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/configmap.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/configmap.yaml new file mode 100644 index 000000000..6e4daa626 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/configmap.yaml @@ -0,0 +1,46 @@ +{{- if .Values.scheduling.userScheduler.enabled -}} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +data: + {{- /* + This is configuration of a k8s official kube-scheduler binary running in the + user-scheduler. + + The config version and kube-scheduler binary version has a fallback for k8s + clusters versioned v1.23 or lower because: + + - v1 / v1beta3 config requires kube-scheduler binary >=1.25 / >=1.23 + - kube-scheduler binary >=1.25 requires storage.k8s.io/v1/CSIStorageCapacity + available first in k8s >=1.24 + + ref: https://kubernetes.io/docs/reference/scheduling/config/ + ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1/ + ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1beta3/ + */}} + config.yaml: | + {{- if semverCompare ">=1.24.0-0" .Capabilities.KubeVersion.Version }} + apiVersion: kubescheduler.config.k8s.io/v1 + {{- else }} + apiVersion: kubescheduler.config.k8s.io/v1beta3 + {{- end }} + kind: KubeSchedulerConfiguration + leaderElection: + resourceLock: endpointsleases + resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }} + resourceNamespace: "{{ .Release.Namespace }}" + profiles: + - schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }} + {{- with .Values.scheduling.userScheduler.plugins }} + plugins: + {{- . | toYaml | nindent 10 }} + {{- end }} + {{- with .Values.scheduling.userScheduler.pluginConfig }} + pluginConfig: + {{- . | toYaml | nindent 10 }} + {{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/deployment.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/deployment.yaml new file mode 100644 index 000000000..ebe15a445 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/deployment.yaml @@ -0,0 +1,103 @@ +{{- if .Values.scheduling.userScheduler.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + {{- if not (typeIs "" .Values.scheduling.userScheduler.revisionHistoryLimit) }} + revisionHistoryLimit: {{ .Values.scheduling.userScheduler.revisionHistoryLimit }} + {{- end }} + replicas: {{ .Values.scheduling.userScheduler.replicas }} + selector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "jupyterhub.matchLabels" . | nindent 8 }} + {{- with .Values.scheduling.userScheduler.labels }} + {{- . | toYaml | nindent 8 }} + {{- end }} + annotations: + checksum/config-map: {{ include (print $.Template.BasePath "/scheduling/user-scheduler/configmap.yaml") . | sha256sum }} + {{- with .Values.scheduling.userScheduler.annotations }} + {{- . | toYaml | nindent 8 }} + {{- end }} + spec: + {{ with include "jupyterhub.user-scheduler-serviceaccount.fullname" . }} + serviceAccountName: {{ . }} + {{- end }} + {{- if .Values.scheduling.podPriority.enabled }} + priorityClassName: {{ include "jupyterhub.priority.fullname" . }} + {{- end }} + {{- with .Values.scheduling.userScheduler.nodeSelector }} + nodeSelector: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- with concat .Values.scheduling.corePods.tolerations .Values.scheduling.userScheduler.tolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} + {{- end }} + {{- include "jupyterhub.coreAffinity" . | nindent 6 }} + volumes: + - name: config + configMap: + name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }} + {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.scheduling.userScheduler.image) }} + imagePullSecrets: {{ . }} + {{- end }} + containers: + - name: kube-scheduler + {{- if semverCompare ">=1.24.0-0" .Capabilities.KubeVersion.Version }} + image: {{ .Values.scheduling.userScheduler.image.name }}:{{ .Values.scheduling.userScheduler.image.tag }} + {{- else }} + # WARNING: The tag of this image is hardcoded, and the + # "scheduling.userScheduler.image.tag" configuration of the + # Helm chart that generated this resource manifest isn't + # respected. If you install the Helm chart in a k8s cluster + # versioned 1.24 or higher, your configuration will be + # respected. + image: {{ .Values.scheduling.userScheduler.image.name }}:v1.23.14 + {{- end }} + {{- with .Values.scheduling.userScheduler.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + command: + - /usr/local/bin/kube-scheduler + # NOTE: --authentication-skip-lookup=true is used to avoid a + # seemingly harmless error, if we need to not skip + # "authentication lookup" in the future, see the linked issue. + # + # ref: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1894 + - --config=/etc/user-scheduler/config.yaml + - --authentication-skip-lookup=true + - --v={{ .Values.scheduling.userScheduler.logLevel }} + volumeMounts: + - mountPath: /etc/user-scheduler + name: config + livenessProbe: + httpGet: + path: /healthz + scheme: HTTPS + port: 10259 + initialDelaySeconds: 15 + readinessProbe: + httpGet: + path: /healthz + scheme: HTTPS + port: 10259 + {{- with .Values.scheduling.userScheduler.resources }} + resources: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.scheduling.userScheduler.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.scheduling.userScheduler.extraPodSpec }} + {{- . | toYaml | nindent 6 }} + {{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/pdb.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/pdb.yaml new file mode 100644 index 000000000..eedb67dc5 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/pdb.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.scheduling.userScheduler.enabled .Values.scheduling.userScheduler.pdb.enabled -}} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + {{- if not (typeIs "" .Values.scheduling.userScheduler.pdb.maxUnavailable) }} + maxUnavailable: {{ .Values.scheduling.userScheduler.pdb.maxUnavailable }} + {{- end }} + {{- if not (typeIs "" .Values.scheduling.userScheduler.pdb.minAvailable) }} + minAvailable: {{ .Values.scheduling.userScheduler.pdb.minAvailable }} + {{- end }} + selector: + matchLabels: + {{- include "jupyterhub.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/rbac.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/rbac.yaml new file mode 100644 index 000000000..a9ce7aad4 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/rbac.yaml @@ -0,0 +1,234 @@ +{{- if .Values.scheduling.userScheduler.enabled -}} +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "jupyterhub.user-scheduler.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +rules: + # Copied from the system:kube-scheduler ClusterRole of the k8s version + # matching the kube-scheduler binary we use. A modification has been made to + # resourceName fields to remain relevant for how we have named our resources + # in this Helm chart. + # + # NOTE: These rules have been: + # - unchanged between 1.12 and 1.15 + # - changed in 1.16 + # - changed in 1.17 + # - unchanged between 1.18 and 1.20 + # - changed in 1.21: get/list/watch permission for namespace, + # csidrivers, csistoragecapacities was added. + # - unchanged between 1.22 and 1.27 + # + # ref: https://github.com/kubernetes/kubernetes/blob/v1.27.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L736-L892 + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - coordination.k8s.io + resourceNames: + - {{ include "jupyterhub.user-scheduler-lock.fullname" . }} + resources: + - leases + verbs: + - get + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - apiGroups: + - "" + resourceNames: + - {{ include "jupyterhub.user-scheduler-lock.fullname" . }} + resources: + - endpoints + verbs: + - get + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - delete + - get + - list + - watch + - apiGroups: + - "" + resources: + - bindings + - pods/binding + verbs: + - create + - apiGroups: + - "" + resources: + - pods/status + verbs: + - patch + - update + - apiGroups: + - "" + resources: + - replicationcontrollers + - services + verbs: + - get + - list + - watch + - apiGroups: + - apps + - extensions + resources: + - replicasets + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - statefulsets + verbs: + - get + - list + - watch + - apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumes + verbs: + - get + - list + - watch + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + - apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - get + - list + - watch + - apiGroups: + - storage.k8s.io + resources: + - csistoragecapacities + verbs: + - get + - list + - watch + + # Copied from the system:volume-scheduler ClusterRole of the k8s version + # matching the kube-scheduler binary we use. + # + # NOTE: These rules have not changed between 1.12 and 1.27. + # + # ref: https://github.com/kubernetes/kubernetes/blob/v1.27.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1311-L1338 + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - update + - watch + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - patch + - update + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "jupyterhub.user-scheduler.fullname" . }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }} + namespace: {{ .Values.namespace }} +roleRef: + kind: ClusterRole + name: {{ include "jupyterhub.user-scheduler.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/serviceaccount.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/serviceaccount.yaml new file mode 100644 index 000000000..28d1c8712 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/scheduling/user-scheduler/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.scheduling.userScheduler.enabled -}} +{{- if .Values.scheduling.userScheduler.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + {{- with .Values.scheduling.userScheduler.serviceAccount.annotations }} + annotations: + {{- . | toYaml | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/singleuser/netpol.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/singleuser/netpol.yaml new file mode 100644 index 000000000..f388b81d3 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/singleuser/netpol.yaml @@ -0,0 +1,99 @@ +{{- if and .Values.singleuser.networkPolicy.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "jupyterhub.singleuser.fullname" . }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "singleuser-server") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 6 }} + policyTypes: + - Ingress + - Egress + + # IMPORTANT: + # NetworkPolicy's ingress "from" and egress "to" rule specifications require + # great attention to detail. A quick summary is: + # + # 1. You can provide "from"/"to" rules that provide access either ports or a + # subset of ports. + # 2. You can for each "from"/"to" rule provide any number of + # "sources"/"destinations" of four different kinds. + # - podSelector - targets pods with a certain label in the same namespace as the NetworkPolicy + # - namespaceSelector - targets all pods running in namespaces with a certain label + # - namespaceSelector and podSelector - targets pods with a certain label running in namespaces with a certain label + # - ipBlock - targets network traffic from/to a set of IP address ranges + # + # Read more at: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors + # + ingress: + {{- with .Values.singleuser.networkPolicy.allowedIngressPorts }} + # allow incoming traffic to these ports independent of source + - ports: + {{- range $port := . }} + - port: {{ $port }} + {{- end }} + {{- end }} + + # allowed pods (hub.jupyter.org/network-access-singleuser) --> singleuser-server + - ports: + - port: notebook-port + from: + # source 1 - labeled pods + - podSelector: + matchLabels: + hub.jupyter.org/network-access-singleuser: "true" + {{- if eq .Values.singleuser.networkPolicy.interNamespaceAccessLabels "accept" }} + namespaceSelector: + matchLabels: {} # without this, the podSelector would only consider pods in the local namespace + # source 2 - pods in labeled namespaces + - namespaceSelector: + matchLabels: + hub.jupyter.org/network-access-singleuser: "true" + {{- end }} + + {{- with .Values.singleuser.networkPolicy.ingress }} + # depends, but default is nothing --> singleuser-server + {{- . | toYaml | nindent 4 }} + {{- end }} + + egress: + # singleuser-server --> hub + - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "hub") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} + ports: + - port: 8081 + + # singleuser-server --> proxy + # singleuser-server --> autohttps + # + # While not critical for core functionality, a user or library code may rely + # on communicating with the proxy or autohttps pods via a k8s Service it can + # detected from well known environment variables. + # + - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "proxy") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} + ports: + - port: 8000 + - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "autohttps") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} + ports: + - port: 8080 + - port: 8443 + + {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.singleuser.networkPolicy)) }} + {{- . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/singleuser/secret.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/singleuser/secret.yaml new file mode 100644 index 000000000..15873ae44 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/templates/singleuser/secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.singleuser.extraFiles }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ include "jupyterhub.singleuser.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} +type: Opaque +{{- with include "jupyterhub.extraFiles.data" .Values.singleuser.extraFiles }} +data: + {{- . | nindent 2 }} +{{- end }} +{{- with include "jupyterhub.extraFiles.stringData" .Values.singleuser.extraFiles }} +stringData: + {{- . | nindent 2 }} +{{- end }} +{{- end }} diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/values.schema.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/values.schema.yaml new file mode 100644 index 000000000..1375a536b --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/values.schema.yaml @@ -0,0 +1,3014 @@ +# This schema (a jsonschema in YAML format) is used to generate +# values.schema.json which is packaged with the Helm chart for client side +# validation by helm of values before template rendering. +# +# This schema is also used by our documentation system to build the +# configuration reference section based on the description fields. See +# docs/source/conf.py for that logic! +# +# We look to document everything we have default values for in values.yaml, but +# we don't look to enforce the perfect validation logic within this file. +# +# ref: https://json-schema.org/learn/getting-started-step-by-step.html +# +$schema: http://json-schema.org/draft-07/schema# +type: object +additionalProperties: false +required: + - imagePullSecrets + - hub + - proxy + - singleuser + - ingress + - prePuller + - custom + - cull + - debug + - rbac + - global +properties: + enabled: + type: [boolean, "null"] + description: | + `enabled` is ignored by the jupyterhub chart itself, but a chart depending + on the jupyterhub chart conditionally can make use this config option as + the condition. + fullnameOverride: + type: [string, "null"] + description: | + fullnameOverride and nameOverride allow you to adjust how the resources + part of the Helm chart are named. + + Name format | Resource types | fullnameOverride | nameOverride | Note + ------------------------- | -------------- | ---------------- | ------------ | - + component | namespaced | `""` | * | Default + release-component | cluster wide | `""` | * | Default + fullname-component | * | str | * | - + release-component | * | null | `""` | - + release-(name-)component | * | null | str | omitted if contained in release + release-(chart-)component | * | null | null | omitted if contained in release + + ```{admonition} Warning! + :class: warning + Changing fullnameOverride or nameOverride after the initial installation + of the chart isn't supported. Changing their values likely leads to a + reset of non-external JupyterHub databases, abandonment of users' storage, + and severed couplings to currently running user pods. + ``` + + If you are a developer of a chart depending on this chart, you should + avoid hardcoding names. If you want to reference the name of a resource in + this chart from a parent helm chart's template, you can make use of the + global named templates instead. + + ```yaml + # some pod definition of a parent chart helm template + schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }} + ``` + + To access them from a container, you can also rely on the hub ConfigMap + that contains entries of all the resource names. + + ```yaml + # some container definition in a parent chart helm template + env: + - name: SCHEDULER_NAME + valueFrom: + configMapKeyRef: + name: {{ include "jupyterhub.user-scheduler.fullname" . }} + key: user-scheduler + ``` + + nameOverride: + type: [string, "null"] + description: | + See the documentation under [`fullnameOverride`](schema_fullnameOverride). + + imagePullSecret: + type: object + required: [create] + if: + properties: + create: + const: true + then: + additionalProperties: false + required: [registry, username, password] + description: | + This is configuration to create a k8s Secret resource of `type: + kubernetes.io/dockerconfigjson`, with credentials to pull images from a + private image registry. If you opt to do so, it will be available for use + by all pods in their respective `spec.imagePullSecrets` alongside other + k8s Secrets defined in `imagePullSecrets` or the pod respective + `...image.pullSecrets` configuration. + + In other words, using this configuration option can automate both the + otherwise manual creation of a k8s Secret and the otherwise manual + configuration to reference this k8s Secret in all the pods of the Helm + chart. + + ```sh + # you won't need to create a k8s Secret manually... + kubectl create secret docker-registry image-pull-secret \ + --docker-server= \ + --docker-username= \ + --docker-email= \ + --docker-password= + ``` + + If you just want to let all Pods reference an existing secret, use the + [`imagePullSecrets`](schema_imagePullSecrets) configuration instead. + properties: + create: + type: boolean + description: | + Toggle the creation of the k8s Secret with provided credentials to + access a private image registry. + automaticReferenceInjection: + type: boolean + description: | + Toggle the automatic reference injection of the created Secret to all + pods' `spec.imagePullSecrets` configuration. + registry: + type: string + description: | + Name of the private registry you want to create a credential set for. + It will default to Docker Hub's image registry. + + Examples: + - https://index.docker.io/v1/ + - quay.io + - eu.gcr.io + - alexmorreale.privatereg.net + username: + type: string + description: | + Name of the user you want to use to connect to your private registry. + + For external gcr.io, you will use the `_json_key`. + + Examples: + - alexmorreale + - alex@pfc.com + - _json_key + password: + type: string + description: | + Password for the private image registry's user. + + Examples: + - plaintextpassword + - abc123SECRETzyx098 + + For gcr.io registries the password will be a big JSON blob for a + Google cloud service account, it should look something like below. + + ```yaml + password: |- + { + "type": "service_account", + "project_id": "jupyter-se", + "private_key_id": "f2ba09118a8d3123b3321bd9a7d6d0d9dc6fdb85", + ... + } + ``` + email: + type: [string, "null"] + description: | + Specification of an email is most often not required, but it is + supported. + + imagePullSecrets: + type: array + description: | + Chart wide configuration to _append_ k8s Secret references to all its + pod's `spec.imagePullSecrets` configuration. + + This will not override or get overridden by pod specific configuration, + but instead augment the pod specific configuration. + + You can use both the k8s native syntax, where each list element is like + `{"name": "my-secret-name"}`, or you can let list elements be strings + naming the secrets directly. + + hub: + type: object + additionalProperties: false + required: [baseUrl] + properties: + revisionHistoryLimit: &revisionHistoryLimit + type: [integer, "null"] + minimum: 0 + description: | + Configures the resource's `spec.revisionHistoryLimit`. This is + available for Deployment, StatefulSet, and DaemonSet resources. + + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#revision-history-limit) + for more info. + config: + type: object + additionalProperties: true + description: | + JupyterHub and its components (authenticators, spawners, etc), are + Python classes that expose its configuration through + [_traitlets_](https://traitlets.readthedocs.io/en/stable/). With this + Helm chart configuration (`hub.config`), you can directly configure + the Python classes through _static_ YAML values. To _dynamically_ set + values, you need to use [`hub.extraConfig`](schema_hub.extraConfig) + instead. + + ```{admonition} Currently intended only for auth config + :class: warning + This config _currently_ (0.11.0) only influence the software in the + `hub` Pod, but some Helm chart config options such as + [`hub.baseUrl`](schema_hub.baseUrl) is used to set + `JupyterHub.base_url` in the `hub` Pod _and_ influence how other Helm + templates are rendered. + + As we have not yet mapped out all the potential configuration + conflicts except for the authentication related configuration options, + please accept that using it for something else at this point can lead + to issues. + ``` + + __Example__ + + If you inspect documentation or some `jupyterhub_config.py` to contain + the following section: + + ```python + c.JupyterHub.admin_access = true + c.JupyterHub.admin_users = ["jovyan1", "jovyan2"] + c.KubeSpawner.k8s_api_request_timeout = 10 + c.GitHubOAuthenticator.allowed_organizations = ["jupyterhub"] + ``` + + Then, you would be able to represent it with this configuration like: + + ```yaml + hub: + config: + JupyterHub: + admin_access: true + admin_users: + - jovyan1 + - jovyan2 + KubeSpawner: + k8s_api_request_timeout: 10 + GitHubOAuthenticator: + allowed_organizations: + - jupyterhub + ``` + + ```{admonition} YAML limitations + :class: tip + You can't represent Python `Bytes` or `Set` objects in YAML directly. + ``` + + ```{admonition} Helm value merging + :class: tip + `helm` merges a Helm chart's default values with values passed with + the `--values` or `-f` flag. During merging, lists are replaced while + dictionaries are updated. + ``` + extraFiles: &extraFiles + type: object + additionalProperties: false + description: | + A dictionary with extra files to be injected into the pod's container + on startup. This can for example be used to inject: configuration + files, custom user interface templates, images, and more. + + ```yaml + # NOTE: "hub" is used in this example, but the configuration is the + # same for "singleuser". + hub: + extraFiles: + # The file key is just a reference that doesn't influence the + # actual file name. + : + # mountPath is required and must be the absolute file path. + mountPath: + + # Choose one out of the three ways to represent the actual file + # content: data, stringData, or binaryData. + # + # data should be set to a mapping (dictionary). It will in the + # end be rendered to either YAML, JSON, or TOML based on the + # filename extension that are required to be either .yaml, .yml, + # .json, or .toml. + # + # If your content is YAML, JSON, or TOML, it can make sense to + # use data to represent it over stringData as data can be merged + # instead of replaced if set partially from separate Helm + # configuration files. + # + # Both stringData and binaryData should be set to a string + # representing the content, where binaryData should be the + # base64 encoding of the actual file content. + # + data: + myConfig: + myMap: + number: 123 + string: "hi" + myList: + - 1 + - 2 + stringData: | + hello world! + binaryData: aGVsbG8gd29ybGQhCg== + + # mode is by default 0644 and you can optionally override it + # either by octal notation (example: 0400) or decimal notation + # (example: 256). + mode: + ``` + + **Using --set-file** + + To avoid embedding entire files in the Helm chart configuration, you + can use the `--set-file` flag during `helm upgrade` to set the + stringData or binaryData field. + + ```yaml + hub: + extraFiles: + my_image: + mountPath: /usr/local/share/jupyterhub/static/my_image.png + + # Files in /usr/local/etc/jupyterhub/jupyterhub_config.d are + # automatically loaded in alphabetical order of the final file + # name when JupyterHub starts. + my_config: + mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.d/my_jupyterhub_config.py + ``` + + ```bash + # --set-file expects a text based file, so you need to base64 encode + # it manually first. + base64 my_image.png > my_image.png.b64 + + helm upgrade <...> \ + --set-file hub.extraFiles.my_image.binaryData=./my_image.png.b64 \ + --set-file hub.extraFiles.my_config.stringData=./my_jupyterhub_config.py + ``` + + **Common uses** + + 1. **JupyterHub template customization** + + You can replace the default JupyterHub user interface templates in + the hub pod by injecting new ones to + `/usr/local/share/jupyterhub/templates`. These can in turn + reference custom images injected to + `/usr/local/share/jupyterhub/static`. + + 1. **JupyterHub standalone file config** + + Instead of embedding JupyterHub python configuration as a string + within a YAML file through + [`hub.extraConfig`](schema_hub.extraConfig), you can inject a + standalone .py file into + `/usr/local/etc/jupyterhub/jupyterhub_config.d` that is + automatically loaded. + + 1. **Flexible configuration** + + By injecting files, you don't have to embed them in a docker image + that you have to rebuild. + + If your configuration file is a YAML/JSON/TOML file, you can also + use `data` instead of `stringData` which allow you to set various + configuration in separate Helm config files. This can be useful to + help dependent charts override only some configuration part of the + file, or to allow for the configuration be set through multiple + Helm configuration files. + + **Limitations** + + 1. File size + + The files in `hub.extraFiles` and `singleuser.extraFiles` are + respectively stored in their own k8s Secret resource. As k8s + Secret's are limited, typically to 1MB, you will be limited to a + total file size of less than 1MB as there is also base64 encoding + that takes place reducing available capacity to 75%. + + 2. File updates + + The files that are mounted are only set during container startup. + This is [because we use + `subPath`](https://kubernetes.io/docs/concepts/storage/volumes/#secret) + as is required to avoid replacing the content of the entire + directory we mount in. + patternProperties: + ".*": + type: object + additionalProperties: false + required: [mountPath] + oneOf: + - required: [data] + - required: [stringData] + - required: [binaryData] + properties: + mountPath: + type: string + data: + type: object + additionalProperties: true + stringData: + type: string + binaryData: + type: string + mode: + type: number + baseUrl: + type: string + description: | + This is the equivalent of c.JupyterHub.base_url, but it is also needed + by the Helm chart in general. So, instead of setting + c.JupyterHub.base_url, use this configuration. + command: + type: array + description: | + A list of strings to be used to replace the JupyterHub image's + `ENTRYPOINT` entry. Note that in k8s lingo, the Dockerfile's + `ENTRYPOINT` is called `command`. The list of strings will be expanded + with Helm's template function `tpl` which can render Helm template + logic inside curly braces (`{{... }}`). + + This could be useful to wrap the invocation of JupyterHub itself in + some custom way. + + For more details, see the [Kubernetes + documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/). + args: + type: array + description: | + A list of strings to be used to replace the JupyterHub image's `CMD` + entry as well as the Helm chart's default way to start JupyterHub. + Note that in k8s lingo, the Dockerfile's `CMD` is called `args`. The + list of strings will be expanded with Helm's template function `tpl` + which can render Helm template logic inside curly braces (`{{... }}`). + + ```{warning} + By replacing the entire configuration file, which is mounted to + `/usr/local/etc/jupyterhub/jupyterhub_config.py` by the Helm chart, + instead of appending to it with `hub.extraConfig`, you expose your + deployment for issues stemming from getting out of sync with the Helm + chart's config file. + + These kind of issues will be significantly harder to debug and + diagnose, and can due to this could cause a lot of time expenditure + for both the community maintaining the Helm chart as well as yourself, + even if this wasn't the reason for the issue. + + Due to this, we ask that you do your _absolute best to avoid replacing + the default provided `jupyterhub_config.py` file. It can often be + possible. For example, if your goal is to have a dedicated .py file + for more extensive additions that you can syntax highlight and such + and feel limited by passing code in `hub.extraConfig` which is part of + a YAML file, you can use [this + trick](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1580#issuecomment-707776237) + instead. + ``` + + ```yaml + hub: + args: + - "jupyterhub" + - "--config" + - "/usr/local/etc/jupyterhub/jupyterhub_config.py" + - "--debug" + - "--upgrade-db" + ``` + + For more details, see the [Kubernetes + documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/). + cookieSecret: + type: [string, "null"] + description: | + ```{note} + As of version 1.0.0 this will automatically be generated and there is + no need to set it manually. + + If you wish to reset a generated key, you can use `kubectl edit` on + the k8s Secret typically named `hub` and remove the + `hub.config.JupyterHub.cookie_secret` entry in the k8s Secret, then + perform a new `helm upgrade`. + ``` + + A 32-byte cryptographically secure randomly generated string used to sign values of + secure cookies set by the hub. If unset, jupyterhub will generate one on startup and + save it in the file `jupyterhub_cookie_secret` in the `/srv/jupyterhub` directory of + the hub container. A value set here will make JupyterHub overwrite any previous file. + + You do not need to set this at all if you are using the default configuration for + storing databases - sqlite on a persistent volume (with `hub.db.type` set to the + default `sqlite-pvc`). If you are using an external database, then you must set this + value explicitly - or your users will keep getting logged out each time the hub pod + restarts. + + Changing this value will all user logins to be invalidated. If this secret leaks, + *immediately* change it to something else, or user data can be compromised + + ```sh + # to generate a value, run + openssl rand -hex 32 + ``` + image: &image-spec + type: object + additionalProperties: false + required: [name, tag] + description: | + Set custom image name, tag, pullPolicy, or pullSecrets for the pod. + properties: + name: + type: string + description: | + The name of the image, without the tag. + + ``` + # example name + gcr.io/my-project/my-image + ``` + tag: + type: string + description: | + The tag of the image to pull. This is the value following `:` in + complete image specifications. + + ``` + # example tags + v1.11.1 + zhy270a + ``` + pullPolicy: + enum: [null, "", IfNotPresent, Always, Never] + description: | + Configures the Pod's `spec.imagePullPolicy`. + + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images) + for more info. + pullSecrets: + type: array + description: | + A list of references to existing Kubernetes Secrets with + credentials to pull the image. + + This Pod's final `imagePullSecrets` k8s specification will be a + combination of: + + 1. This list of k8s Secrets, specific for this pod. + 2. The list of k8s Secrets, for use by all pods in the Helm chart, + declared in this Helm charts configuration called + `imagePullSecrets`. + 3. A k8s Secret, for use by all pods in the Helm chart, if + conditionally created from image registry credentials provided + under `imagePullSecret` if `imagePullSecret.create` is set to + true. + + ```yaml + # example - k8s native syntax + pullSecrets: + - name: my-k8s-secret-with-image-registry-credentials + + # example - simplified syntax + pullSecrets: + - my-k8s-secret-with-image-registry-credentials + ``` + networkPolicy: &networkPolicy-spec + type: object + additionalProperties: false + description: | + This configuration regards the creation and configuration of a k8s + _NetworkPolicy resource_. + properties: + enabled: + type: boolean + description: | + Toggle the creation of the NetworkPolicy resource targeting this + pod, and by doing so, restricting its communication to only what + is explicitly allowed in the NetworkPolicy. + ingress: + type: array + description: | + Additional ingress rules to add besides those that are required + for core functionality. + egress: + type: array + description: | + Additional egress rules to add besides those that are required for + core functionality and those added via + [`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules). + + ```{versionchanged} 2.0.0 + The default value changed from providing one very permissive rule + allowing all egress to providing no rule. The permissive rule is + still provided via + [`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules) + set to true though. + ``` + + As an example, below is a configuration that disables the more + broadly permissive `.privateIPs` egress allow rule for the hub + pod, and instead provides tightly scoped permissions to access a + specific k8s local service as identified by pod labels. + + ```yaml + hub: + networkPolicy: + egressAllowRules: + privateIPs: false + egress: + - to: + - podSelector: + matchLabels: + app: my-k8s-local-service + ports: + - protocol: TCP + port: 5978 + ``` + egressAllowRules: + type: object + additionalProperties: false + description: | + This is a set of predefined rules that when enabled will be added + to the NetworkPolicy list of egress rules. + + The resulting egress rules will be a composition of: + - rules specific for the respective pod(s) function within the + Helm chart + - rules based on enabled `egressAllowRules` flags + - rules explicitly specified by the user + + ```{note} + Each flag under this configuration will not render into a + dedicated rule in the NetworkPolicy resource, but instead combine + with the other flags to a reduced set of rules to avoid a + performance penalty. + ``` + + ```{versionadded} 2.0.0 + ``` + properties: + cloudMetadataServer: + type: boolean + description: | + Defaults to `false` for singleuser servers, but to `true` for + all other network policies. + + When enabled this rule allows the respective pod(s) to + establish outbound connections to the cloud metadata server. + + Note that the `nonPrivateIPs` rule is allowing all non Private + IP ranges but makes an exception for the cloud metadata + server, leaving this as the definitive configuration to allow + access to the cloud metadata server. + + ```{versionchanged} 3.0.0 + This configuration is not allowed to be configured true at the + same time as + [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables) + to avoid an ambiguous configuration. + ``` + dnsPortsCloudMetadataServer: + type: boolean + description: | + Defaults to `true` for all network policies. + + When enabled this rule allows the respective pod(s) to + establish outbound connections to the cloud metadata server + via port 53. + + Relying on this rule for the singleuser config should go hand + in hand with disabling + [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables) + to avoid an ambiguous configuration. + + Known situations when this rule can be relevant: + + - In GKE clusters with Cloud DNS that is reached at the + cloud metadata server's non-private IP. + + ```{note} + This chart doesn't know how to identify the DNS server that + pods will rely on due to variations between how k8s clusters + have been setup. Due to that, multiple rules are enabled by + default to ensure DNS connectivity. + ``` + + ```{versionadded} 3.0.0 + ``` + dnsPortsKubeSystemNamespace: + type: boolean + description: | + Defaults to `true` for all network policies. + + When enabled this rule allows the respective pod(s) to + establish outbound connections to pods in the kube-system + namespace via port 53. + + Known situations when this rule can be relevant: + + - GKE, EKS, AKS, and other clusters relying directly on + `kube-dns` or `coredns` pods in the `kube-system` namespace. + + ```{note} + This chart doesn't know how to identify the DNS server that + pods will rely on due to variations between how k8s clusters + have been setup. Due to that, multiple rules are enabled by + default to ensure DNS connectivity. + ``` + + ```{versionadded} 3.0.0 + ``` + dnsPortsPrivateIPs: + type: boolean + description: | + Defaults to `true` for all network policies. + + When enabled this rule allows the respective pod(s) to + establish outbound connections to private IPs via port 53. + + Known situations when this rule can be relevant: + + - GKE clusters relying on a DNS server indirectly via a a node + local DNS cache at an unknown private IP. + + ```{note} + This chart doesn't know how to identify the DNS server that + pods will rely on due to variations between how k8s clusters + have been setup. Due to that, multiple rules are enabled by + default to ensure DNS connectivity. + + ```{warning} + This rule is not expected to work in clusters relying on + Cilium to enforce the NetworkPolicy rules (includes GKE + clusters with Dataplane v2), this is due to a [known + limitation](https://github.com/cilium/cilium/issues/9209). + ``` + nonPrivateIPs: + type: boolean + description: | + Defaults to `true` for all network policies. + + When enabled this rule allows the respective pod(s) to + establish outbound connections to the non-private IP ranges + with the exception of the cloud metadata server. This means + respective pod(s) can establish connections to the internet + but not (say) an unsecured prometheus server running in the + same cluster. + privateIPs: + type: boolean + description: | + Defaults to `false` for singleuser servers, but to `true` for + all other network policies. + + Private IPs refer to the IP ranges `10.0.0.0/8`, + `172.16.0.0/12`, `192.168.0.0/16`. + + When enabled this rule allows the respective pod(s) to + establish outbound connections to the internal k8s cluster. + This means users can access the internet but not (say) an + unsecured prometheus server running in the same cluster. + + Since not all workloads in the k8s cluster may have + NetworkPolicies setup to restrict their incoming connections, + having this set to false can be a good defense against + malicious intent from someone in control of software in these + pods. + + If possible, try to avoid setting this to true as it gives + broad permissions that could be specified more directly via + the [`.egress`](schema_singleuser.networkPolicy.egress). + + ```{warning} + This rule is not expected to work in clusters relying on + Cilium to enforce the NetworkPolicy rules (includes GKE + clusters with Dataplane v2), this is due to a [known + limitation](https://github.com/cilium/cilium/issues/9209). + ``` + interNamespaceAccessLabels: + enum: [accept, ignore] + description: | + This configuration option determines if both namespaces and pods + in other namespaces, that have specific access labels, should be + accepted to allow ingress (set to `accept`), or, if the labels are + to be ignored when applied outside the local namespace (set to + `ignore`). + + The available access labels for respective NetworkPolicy resources + are: + + - `hub.jupyter.org/network-access-hub: "true"` (hub) + - `hub.jupyter.org/network-access-proxy-http: "true"` (proxy.chp, proxy.traefik) + - `hub.jupyter.org/network-access-proxy-api: "true"` (proxy.chp) + - `hub.jupyter.org/network-access-singleuser: "true"` (singleuser) + allowedIngressPorts: + type: array + description: | + A rule to allow ingress on these ports will be added no matter + what the origin of the request is. The default setting for + `proxy.chp` and `proxy.traefik`'s networkPolicy configuration is + `[http, https]`, while it is `[]` for other networkPolicies. + + Note that these port names or numbers target a Pod's port name or + number, not a k8s Service's port name or number. + db: + type: object + additionalProperties: false + properties: + type: + enum: [sqlite-pvc, sqlite-memory, mysql, postgres, other] + description: | + Type of database backend to use for the hub database. + + The Hub requires a persistent database to function, and this lets you specify + where it should be stored. + + The various options are: + + 1. **sqlite-pvc** + + Use an `sqlite` database kept on a persistent volume attached to the hub. + + By default, this disk is created by the cloud provider using + *dynamic provisioning* configured by a [storage + class](https://kubernetes.io/docs/concepts/storage/storage-classes/). + You can customize how this disk is created / attached by + setting various properties under `hub.db.pvc`. + + This is the default setting, and should work well for most cloud provider + deployments. + + 2. **sqlite-memory** + + Use an in-memory `sqlite` database. This should only be used for testing, + since the database is erased whenever the hub pod restarts - causing the hub + to lose all memory of users who had logged in before. + + When using this for testing, make sure you delete all other objects that the + hub has created (such as user pods, user PVCs, etc) every time the hub restarts. + Otherwise you might run into errors about duplicate resources. + + 3. **mysql** + + Use an externally hosted mysql database. + + You have to specify an sqlalchemy connection string for the mysql database you + want to connect to in `hub.db.url` if using this option. + + The general format of the connection string is: + ``` + mysql+pymysql://:@:/ + ``` + + The user specified in the connection string must have the rights to create + tables in the database specified. + + 4. **postgres** + + Use an externally hosted postgres database. + + You have to specify an sqlalchemy connection string for the postgres database you + want to connect to in `hub.db.url` if using this option. + + The general format of the connection string is: + ``` + postgresql+psycopg2://:@:/ + ``` + + The user specified in the connection string must have the rights to create + tables in the database specified. + + 5. **other** + + Use an externally hosted database of some kind other than mysql + or postgres. + + When using _other_, the database password must be passed as + part of [hub.db.url](schema_hub.db.url) as + [hub.db.password](schema_hub.db.password) will be ignored. + pvc: + type: object + additionalProperties: false + required: [storage] + description: | + Customize the Persistent Volume Claim used when `hub.db.type` is `sqlite-pvc`. + properties: + annotations: + type: object + additionalProperties: false + patternProperties: &labels-and-annotations-patternProperties + ".*": + type: string + description: | + Annotations to apply to the PVC containing the sqlite database. + + See [the Kubernetes + documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + for more details about annotations. + selector: + type: object + additionalProperties: true + description: | + Label selectors to set for the PVC containing the sqlite database. + + Useful when you are using a specific PV, and want to bind to + that and only that. + + See [the Kubernetes + documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) + for more details about using a label selector for what PV to + bind to. + storage: + type: string + description: | + Size of disk to request for the database disk. + accessModes: + type: array + items: + type: [string, "null"] + description: | + AccessModes contains the desired access modes the volume + should have. See [the k8s + documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1) + for more information. + storageClassName: + type: [string, "null"] + description: | + Name of the StorageClass required by the claim. + + If this is a blank string it will be set to a blank string, + while if it is null, it will not be set at all. + subPath: + type: [string, "null"] + description: | + Path within the volume from which the container's volume + should be mounted. Defaults to "" (volume's root). + upgrade: + type: [boolean, "null"] + description: | + Users with external databases need to opt-in for upgrades of the + JupyterHub specific database schema if needed as part of a + JupyterHub version upgrade. + url: + type: [string, "null"] + description: | + Connection string when `hub.db.type` is mysql or postgres. + + See documentation for `hub.db.type` for more details on the format of this property. + password: + type: [string, "null"] + description: | + Password for the database when `hub.db.type` is mysql or postgres. + labels: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Extra labels to add to the hub pod. + + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + to learn more about labels. + initContainers: + type: array + description: | + list of initContainers to be run with hub pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) + + ```yaml + hub: + initContainers: + - name: init-myservice + image: busybox:1.28 + command: ['sh', '-c', 'command1'] + - name: init-mydb + image: busybox:1.28 + command: ['sh', '-c', 'command2'] + ``` + extraEnv: + type: [object, array] + additionalProperties: true + description: | + Extra environment variables that should be set for the hub pod. + + Environment variables are usually used to: + - Pass parameters to some custom code in `hub.extraConfig`. + - Configure code running in the hub pod, such as an authenticator or + spawner. + + String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which + is a part of Kubernetes. + + ```yaml + hub: + extraEnv: + # basic notation (for literal values only) + MY_ENV_VARS_NAME1: "my env var value 1" + + # explicit notation (the "name" field takes precedence) + HUB_NAMESPACE: + name: HUB_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + + # implicit notation (the "name" field is implied) + PREFIXED_HUB_NAMESPACE: + value: "my-prefix-$(HUB_NAMESPACE)" + SECRET_VALUE: + valueFrom: + secretKeyRef: + name: my-k8s-secret + key: password + ``` + + For more information, see the [Kubernetes EnvVar + specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core). + extraConfig: + type: object + additionalProperties: true + description: | + Arbitrary extra python based configuration that should be in `jupyterhub_config.py`. + + This is the *escape hatch* - if you want to configure JupyterHub to do something specific + that is not present here as an option, you can write the raw Python to do it here. + + extraConfig is a *dict*, so there can be multiple configuration + snippets under different names. The configuration sections are run in + alphabetical order based on the keys. + + Non-exhaustive examples of things you can do here: + - Subclass authenticator / spawner to do a custom thing + - Dynamically launch different images for different sets of images + - Inject an auth token from GitHub authenticator into user pod + - Anything else you can think of! + + Since this is usually a multi-line string, you want to format it using YAML's + [| operator](https://yaml.org/spec/1.2.2/#23-scalars). + + For example: + + ```yaml + hub: + extraConfig: + myConfig.py: | + c.JupyterHub.something = 'something' + c.Spawner.something_else = 'something else' + ``` + + ```{note} + No code validation is performed until JupyterHub loads it! If you make + a typo here, it will probably manifest itself as the hub pod failing + to start up and instead entering an `Error` state or the subsequent + `CrashLoopBackoff` state. + + To make use of your own programs linters etc, it would be useful to + not embed Python code inside a YAML file. To do that, consider using + [`hub.extraFiles`](schema_hub.extraFiles) and mounting a file to + `/usr/local/etc/jupyterhub/jupyterhub_config.d` in order to load your + extra configuration logic. + ``` + + fsGid: + type: [integer, "null"] + minimum: 0 + # This schema entry is needed to help us print a more helpful error + # message in NOTES.txt if hub.fsGid is set. + # + description: | + ```{note} + Removed in version 2.0.0. Use + [`hub.podSecurityContext`](schema_hub.podSecurityContext) and specify + `fsGroup` instead. + ``` + service: + type: object + additionalProperties: false + description: | + Object to configure the service the JupyterHub will be exposed on by the Kubernetes server. + properties: + type: + enum: [ClusterIP, NodePort, LoadBalancer, ExternalName] + description: | + The Kubernetes ServiceType to be used. + + The default type is `ClusterIP`. + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) + to learn more about service types. + ports: + type: object + additionalProperties: false + description: | + Object to configure the ports the hub service will be deployed on. + properties: + nodePort: + type: [integer, "null"] + minimum: 0 + description: | + The nodePort to deploy the hub service on. + annotations: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Kubernetes annotations to apply to the hub service. + extraPorts: + type: array + description: | + Extra ports to add to the Hub Service object besides `hub` / `8081`. + This should be an array that includes `name`, `port`, and `targetPort`. + See [Multi-port Services](https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services) for more details. + loadBalancerIP: + type: [string, "null"] + description: | + A public IP address the hub Kubernetes service should be exposed + on. To expose the hub directly is not recommended. Instead route + traffic through the proxy-public service towards the hub. + + pdb: &pdb-spec + type: object + additionalProperties: false + description: | + Configure a PodDisruptionBudget for this Deployment. + + These are disabled by default for our deployments that don't support + being run in parallel with multiple replicas. Only the user-scheduler + currently supports being run in parallel with multiple replicas. If + they are enabled for a Deployment with only one replica, they will + block `kubectl drain` of a node for example. + + Note that if you aim to block scaling down a node with the + hub/proxy/autohttps pod that would cause disruptions of the + deployment, then you should instead annotate the pods of the + Deployment [as described + here](https://github.com/kubernetes/autoscaler/blob/HEAD/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node). + + "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" + + See [the Kubernetes + documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) + for more details about disruptions. + properties: + enabled: + type: boolean + description: | + Decides if a PodDisruptionBudget is created targeting the + Deployment's pods. + maxUnavailable: + type: [integer, "null"] + description: | + The maximum number of pods that can be unavailable during + voluntary disruptions. + minAvailable: + type: [integer, "null"] + description: | + The minimum number of pods required to be available during + voluntary disruptions. + existingSecret: + type: [string, "null"] + description: | + This option allow you to provide the name of an existing k8s Secret to + use alongside of the chart managed k8s Secret. The content of this k8s + Secret will be merged with the chart managed k8s Secret, giving + priority to the self-managed k8s Secret. + + ```{warning} + 1. The self managed k8s Secret must mirror the structure in the chart + managed secret. + 2. [`proxy.secretToken`](schema_proxy.secretToken) (aka. + `hub.config.ConfigurableHTTPProxy.auth_token`) is only read from + the chart managed k8s Secret. + ``` + nodeSelector: &nodeSelector-spec + type: object + additionalProperties: true + description: | + An object with key value pairs representing labels. K8s Nodes are + required to have match all these labels for this Pod to scheduled on + them. + + ```yaml + disktype: ssd + nodetype: awesome + ``` + + See [the Kubernetes + documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) + for more details. + tolerations: &tolerations-spec + type: array + description: | + Tolerations allow a pod to be scheduled on nodes with taints. These + tolerations are additional tolerations to the tolerations common to + all pods of a their respective kind + ([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations), + [scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)). + + Pass this field an array of + [`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core) + objects. + + See the [Kubernetes + docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) + for more info. + activeServerLimit: + type: [integer, "null"] + description: &jupyterhub-native-config-description | + JupyterHub native configuration, see the [JupyterHub + documentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html) + for more information. + allowNamedServers: + type: [boolean, "null"] + description: *jupyterhub-native-config-description + annotations: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + K8s annotations for the hub pod. + authenticatePrometheus: + type: [boolean, "null"] + description: *jupyterhub-native-config-description + concurrentSpawnLimit: + type: [integer, "null"] + description: *jupyterhub-native-config-description + consecutiveFailureLimit: + type: [integer, "null"] + description: *jupyterhub-native-config-description + podSecurityContext: &podSecurityContext-spec + additionalProperties: true + description: | + A k8s native specification of the pod's security context, see [the + documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podsecuritycontext-v1-core) + for details. + containerSecurityContext: &containerSecurityContext-spec + type: object + additionalProperties: true + description: | + A k8s native specification of the container's security context, see [the + documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core) + for details. + deploymentStrategy: + type: object + additionalProperties: false + properties: + rollingUpdate: + type: [string, "null"] + type: + type: [string, "null"] + description: | + JupyterHub does not support running in parallel, due to this we + default to using a deployment strategy of Recreate. + extraContainers: &extraContainers-spec + type: array + description: | + Additional containers for the Pod. Use a k8s native syntax. + extraVolumeMounts: &extraVolumeMounts-spec + type: array + description: | + Additional volume mounts for the Container. Use a k8s native syntax. + extraVolumes: &extraVolumes-spec + type: array + description: | + Additional volumes for the Pod. Use a k8s native syntax. + livenessProbe: &probe-spec + type: object + additionalProperties: true + required: [enabled] + if: + properties: + enabled: + const: true + then: + description: | + This config option is like the k8s native specification of a + container probe, except that it also supports an `enabled` boolean + flag. + + See [the k8s + documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core) + for more details. + readinessProbe: *probe-spec + namedServerLimitPerUser: + type: [integer, "null"] + description: *jupyterhub-native-config-description + redirectToServer: + type: [boolean, "null"] + description: *jupyterhub-native-config-description + resources: &resources-spec + type: object + additionalProperties: true + description: | + A k8s native specification of resources, see [the + documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core). + lifecycle: &lifecycle-spec + type: object + additionalProperties: false + description: | + A k8s native specification of lifecycle hooks on the container, see [the + documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#lifecycle-v1-core). + properties: + postStart: + type: object + additionalProperties: true + preStop: + type: object + additionalProperties: true + services: + type: object + additionalProperties: true + description: | + This is where you register JupyterHub services. For details on how to + configure these services in this Helm chart just keep reading but for + details on services themselves instead read [JupyterHub's + documentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/service.html). + + ```{note} + Only a selection of JupyterHub's configuration options that can be + configured for a service are documented below. All configuration set + here will be applied even if this Helm chart doesn't recognize it. + ``` + + JupyterHub's native configuration accepts a list of service objects, + this Helm chart only accept a dictionary where each key represents the + name of a service and the value is the actual service objects. + + When configuring JupyterHub services via this Helm chart, the `name` + field can be omitted as it can be implied by the dictionary key. + Further, the `api_token` field can be omitted as it will be + automatically generated as of version 1.1.0 of this Helm chart. + + If you have an external service that needs to access the automatically + generated api_token for the service, you can access it from the `hub` + k8s Secret part of this Helm chart under the key + `hub.services.my-service-config-key.apiToken`. + + Here is an example configuration of two services where the first + explicitly sets a name and api_token, while the second omits those and + lets the name be implied from the key name and the api_token be + automatically generated. + + ```yaml + hub: + services: + my-service-1: + admin: true + name: my-explicitly-set-service-name + api_token: my-explicitly-set-api_token + + # the name of the following service will be my-service-2 + # the api_token of the following service will be generated + my-service-2: {} + ``` + + If you develop a Helm chart depending on the JupyterHub Helm chart and + want to let some Pod's environment variable be populated with the + api_token of a service registered like above, then do something along + these lines. + + ```yaml + # ... container specification of a pod ... + env: + - name: MY_SERVICE_1_API_TOKEN + valueFrom: + secretKeyRef: + # Don't hardcode the name, use the globally accessible + # named templates part of the JupyterHub Helm chart. + name: {{ include "jupyterhub.hub.fullname" . }} + # Note below the use of the configuration key my-service-1 + # rather than the explicitly set service name. + key: hub.services.my-service-1.apiToken + ``` + properties: + name: + type: string + description: | + The name can be implied via the key name under which this + service is configured, and is due to that allowed to be + omitted in this Helm chart configuration of JupyterHub. + admin: + type: boolean + command: + type: [string, array] + url: + type: string + api_token: + type: [string, "null"] + description: | + The api_token will be automatically generated if not + explicitly set. It will also be exposed in via a k8s Secret + part of this Helm chart under a specific key. + + See the documentation under + [`hub.services`](schema_hub.services) for details about this. + apiToken: + type: [string, "null"] + description: | + An alias for api_token provided for backward compatibility by + the JupyterHub Helm chart that will be transformed to + api_token. + loadRoles: + type: object + additionalProperties: true + description: | + This is where you should define JupyterHub roles and apply them to + JupyterHub users, groups, and services to grant them additional + permissions as defined in JupyterHub's RBAC system. + + Complement this documentation with [JupyterHub's + documentation](https://jupyterhub.readthedocs.io/en/stable/rbac/roles.html#defining-roles) + about `load_roles`. + + Note that while JupyterHub's native configuration `load_roles` accepts + a list of role objects, this Helm chart only accepts a dictionary where + each key represents the name of a role and the value is the actual + role object. + + ```yaml + hub: + loadRoles: + teacher: + description: Access to users' information and group membership + + # this role provides permissions to... + scopes: [users, groups] + + # this role will be assigned to... + users: [erik] + services: [grading-service] + groups: [teachers] + ``` + + When configuring JupyterHub roles via this Helm chart, the `name` + field can be omitted as it can be implied by the dictionary key. + shutdownOnLogout: + type: [boolean, "null"] + description: *jupyterhub-native-config-description + templatePaths: + type: array + description: *jupyterhub-native-config-description + templateVars: + type: object + additionalProperties: true + description: *jupyterhub-native-config-description + serviceAccount: &serviceAccount + type: object + required: [create] + additionalProperties: false + description: | + Configuration for a k8s ServiceAccount dedicated for use by the + specific pod which this configuration is nested under. + properties: + create: + type: boolean + description: | + Whether or not to create the `ServiceAccount` resource. + name: + type: ["string", "null"] + description: | + This configuration serves multiple purposes: + + - It will be the `serviceAccountName` referenced by related Pods. + - If `create` is set, the created ServiceAccount resource will be named like this. + - If [`rbac.create`](schema_rbac.create) is set, the associated (Cluster)RoleBindings will bind to this name. + + If not explicitly provided, a default name will be used. + annotations: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Kubernetes annotations to apply to the k8s ServiceAccount. + extraPodSpec: &extraPodSpec-spec + type: object + additionalProperties: true + description: | + Arbitrary extra k8s pod specification as a YAML object. The default + value of this setting is an empty object, i.e. no extra configuration. + The value of this property is augmented to the pod specification as-is. + + This is a powerful tool for expert k8s administrators with advanced + configuration requirements. This setting should only be used for + configuration that cannot be accomplished through the other settings. + Misusing this setting can break your deployment and/or compromise + your system security. + + This is one of four related settings for inserting arbitrary pod + specification: + + 1. hub.extraPodSpec + 2. proxy.chp.extraPodSpec + 3. proxy.traefik.extraPodSpec + 4. scheduling.userScheduler.extraPodSpec + + One real-world use of these settings is to enable host networking. For + example, to configure host networking for the hub pod, add the + following to your helm configuration values: + + ```yaml + hub: + extraPodSpec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + ``` + + Likewise, to configure host networking for the proxy pod, add the + following: + + ```yaml + proxy: + chp: + extraPodSpec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + ``` + + N.B. Host networking has special security implications and can easily + break your deployment. This is an example—not an endorsement. + + See [PodSpec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec) + for the latest pod resource specification. + + proxy: + type: object + additionalProperties: false + properties: + chp: + type: object + additionalProperties: false + description: | + Configure the configurable-http-proxy (chp) pod managed by jupyterhub to route traffic + both to itself and to user pods. + properties: + revisionHistoryLimit: *revisionHistoryLimit + networkPolicy: *networkPolicy-spec + extraCommandLineFlags: + type: array + description: | + A list of strings to be added as command line options when + starting + [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy#command-line-options) + that will be expanded with Helm's template function `tpl` which + can render Helm template logic inside curly braces (`{{ ... }}`). + + ```yaml + proxy: + chp: + extraCommandLineFlags: + - "--auto-rewrite" + - "--custom-header={{ .Values.custom.myStuff }}" + ``` + + Note that these will be appended last, and if you provide the same + flag twice, the last flag will be used, which mean you can + override the default flag values as well. + extraEnv: + type: [object, array] + additionalProperties: true + description: | + Extra environment variables that should be set for the chp pod. + + Environment variables are usually used here to: + - override HUB_SERVICE_PORT or HUB_SERVICE_HOST default values + - set CONFIGPROXY_SSL_KEY_PASSPHRASE for setting passphrase of SSL keys + + String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which + is a part of Kubernetes. + + ```yaml + proxy: + chp: + extraEnv: + # basic notation (for literal values only) + MY_ENV_VARS_NAME1: "my env var value 1" + + # explicit notation (the "name" field takes precedence) + CHP_NAMESPACE: + name: CHP_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + + # implicit notation (the "name" field is implied) + PREFIXED_CHP_NAMESPACE: + value: "my-prefix-$(CHP_NAMESPACE)" + SECRET_VALUE: + valueFrom: + secretKeyRef: + name: my-k8s-secret + key: password + ``` + + For more information, see the [Kubernetes EnvVar + specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core). + pdb: *pdb-spec + nodeSelector: *nodeSelector-spec + tolerations: *tolerations-spec + containerSecurityContext: *containerSecurityContext-spec + image: *image-spec + livenessProbe: *probe-spec + readinessProbe: *probe-spec + resources: *resources-spec + defaultTarget: + type: [string, "null"] + description: | + Override the URL for the default routing target for the proxy. + Defaults to JupyterHub itself. + This will generally only have an effect while JupyterHub is not running, + as JupyterHub adds itself as the default target after it starts. + errorTarget: + type: [string, "null"] + description: | + Override the URL for the error target for the proxy. + Defaults to JupyterHub itself. + Useful to reduce load on the Hub + or produce more informative error messages than the Hub's default, + e.g. in highly customized deployments such as BinderHub. + See Configurable HTTP Proxy for details on implementing an error target. + extraPodSpec: *extraPodSpec-spec + secretToken: + type: [string, "null"] + description: | + ```{note} + As of version 1.0.0 this will automatically be generated and there is + no need to set it manually. + + If you wish to reset a generated key, you can use `kubectl edit` on + the k8s Secret typically named `hub` and remove the + `hub.config.ConfigurableHTTPProxy.auth_token` entry in the k8s Secret, + then perform a new `helm upgrade`. + ``` + + A 32-byte cryptographically secure randomly generated string used to + secure communications between the hub pod and the proxy pod running a + [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy) + instance. + + ```sh + # to generate a value, run + openssl rand -hex 32 + ``` + + Changing this value will cause the proxy and hub pods to restart. It is good security + practice to rotate these values over time. If this secret leaks, *immediately* change + it to something else, or user data can be compromised. + service: + type: object + additionalProperties: false + description: | + Configuration of the k8s Service `proxy-public` which either will + point to the `autohttps` pod running Traefik for TLS termination, or + the `proxy` pod running ConfigurableHTTPProxy. Incoming traffic from + users on the internet should always go through this k8s Service. + + When this service targets the `autohttps` pod which then routes to the + `proxy` pod, a k8s Service named `proxy-http` will be added targeting + the `proxy` pod and only accepting HTTP traffic on port 80. + properties: + type: + enum: [ClusterIP, NodePort, LoadBalancer, ExternalName] + description: | + Default `LoadBalancer`. + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) + to learn more about service types. + labels: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Extra labels to add to the proxy service. + + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + to learn more about labels. + annotations: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Annotations to apply to the service that is exposing the proxy. + + See [the Kubernetes + documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + for more details about annotations. + nodePorts: + type: object + additionalProperties: false + description: | + Object to set NodePorts to expose the service on for http and https. + + See [the Kubernetes + documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) + for more details about NodePorts. + properties: + http: + type: [integer, "null"] + description: | + The HTTP port the proxy-public service should be exposed on. + https: + type: [integer, "null"] + description: | + The HTTPS port the proxy-public service should be exposed on. + disableHttpPort: + type: boolean + description: | + Default `false`. + + If `true`, port 80 for incoming HTTP traffic will no longer be exposed. This should not be used with `proxy.https.type=letsencrypt` or `proxy.https.enabled=false` as it would remove the only exposed port. + extraPorts: + type: array + description: | + Extra ports the k8s Service should accept incoming traffic on, + which will be redirected to either the `autohttps` pod (treafik) + or the `proxy` pod (chp). + + See [the Kubernetes + documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#serviceport-v1-core) + for the structure of the items in this list. + loadBalancerIP: + type: [string, "null"] + description: | + The public IP address the proxy-public Kubernetes service should + be exposed on. This entry will end up at the configurable proxy + server that JupyterHub manages, which will direct traffic to user + pods at the `/user` path and the hub pod at the `/hub` path. + + Set this if you want to use a fixed external IP address instead of + a dynamically acquired one. This is relevant if you have a domain + name that you want to point to a specific IP and want to ensure it + doesn't change. + loadBalancerSourceRanges: + type: array + description: | + A list of IP CIDR ranges that are allowed to access the load balancer service. + Defaults to allowing everyone to access it. + https: + type: object + additionalProperties: false + description: | + Object for customizing the settings for HTTPS used by the JupyterHub's proxy. + For more information on configuring HTTPS for your JupyterHub, see the [HTTPS section in our security guide](https) + properties: + enabled: + type: [boolean, "null"] + description: | + Indicator to set whether HTTPS should be enabled or not on the proxy. Defaults to `true` if the https object is provided. + type: + enum: [null, "", letsencrypt, manual, offload, secret] + description: | + The type of HTTPS encryption that is used. + Decides on which ports and network policies are used for communication via HTTPS. Setting this to `secret` sets the type to manual HTTPS with a secret that has to be provided in the `https.secret` object. + Defaults to `letsencrypt`. + letsencrypt: + type: object + additionalProperties: false + properties: + contactEmail: + type: [string, "null"] + description: | + The contact email to be used for automatically provisioned HTTPS certificates by Let's Encrypt. For more information see [Set up automatic HTTPS](setup-automatic-https). + Required for automatic HTTPS. + acmeServer: + type: [string, "null"] + description: | + Let's Encrypt is one of various ACME servers that can provide + a certificate, and by default their production server is used. + + Let's Encrypt staging: https://acme-staging-v02.api.letsencrypt.org/directory + Let's Encrypt production: acmeServer: https://acme-v02.api.letsencrypt.org/directory + manual: + type: object + additionalProperties: false + description: | + Object for providing own certificates for manual HTTPS configuration. To be provided when setting `https.type` to `manual`. + See [Set up manual HTTPS](setup-manual-https) + properties: + key: + type: [string, "null"] + description: | + The RSA private key to be used for HTTPS. + To be provided in the form of + + ``` + key: | + -----BEGIN RSA PRIVATE KEY----- + ... + -----END RSA PRIVATE KEY----- + ``` + cert: + type: [string, "null"] + description: | + The certificate to be used for HTTPS. + To be provided in the form of + + ``` + cert: | + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- + ``` + secret: + type: object + additionalProperties: false + description: | + Secret to be provided when setting `https.type` to `secret`. + properties: + name: + type: [string, "null"] + description: | + Name of the secret + key: + type: [string, "null"] + description: | + Path to the private key to be used for HTTPS. + Example: `'tls.key'` + crt: + type: [string, "null"] + description: | + Path to the certificate to be used for HTTPS. + Example: `'tls.crt'` + hosts: + type: array + description: | + You domain in list form. + Required for automatic HTTPS. See [Set up automatic HTTPS](setup-automatic-https). + To be provided like: + ``` + hosts: + - + ``` + traefik: + type: object + additionalProperties: false + description: | + Configure the traefik proxy used to terminate TLS when 'autohttps' is enabled + properties: + revisionHistoryLimit: *revisionHistoryLimit + labels: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Extra labels to add to the traefik pod. + + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + to learn more about labels. + networkPolicy: *networkPolicy-spec + extraInitContainers: + type: array + description: | + list of extraInitContainers to be run with traefik pod, after the containers set in the chart. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) + + ```yaml + proxy: + traefik: + extraInitContainers: + - name: init-myservice + image: busybox:1.28 + command: ['sh', '-c', 'command1'] + - name: init-mydb + image: busybox:1.28 + command: ['sh', '-c', 'command2'] + ``` + extraEnv: + type: [object, array] + additionalProperties: true + description: | + Extra environment variables that should be set for the traefik pod. + + Environment Variables here may be used to configure traefik. + + String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which + is a part of Kubernetes. + + ```yaml + proxy: + traefik: + extraEnv: + # basic notation (for literal values only) + MY_ENV_VARS_NAME1: "my env var value 1" + + # explicit notation (the "name" field takes precedence) + TRAEFIK_NAMESPACE: + name: TRAEFIK_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + + # implicit notation (the "name" field is implied) + PREFIXED_TRAEFIK_NAMESPACE: + value: "my-prefix-$(TRAEFIK_NAMESPACE)" + SECRET_VALUE: + valueFrom: + secretKeyRef: + name: my-k8s-secret + key: password + ``` + + For more information, see the [Kubernetes EnvVar + specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core). + pdb: *pdb-spec + nodeSelector: *nodeSelector-spec + tolerations: *tolerations-spec + containerSecurityContext: *containerSecurityContext-spec + extraDynamicConfig: + type: object + additionalProperties: true + description: | + This refers to traefik's post-startup configuration. + + This Helm chart already provide such configuration, so this is a + place where you can merge in additional configuration. If you are + about to use this configuration, you may want to inspect the + default configuration declared + [here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml). + extraPorts: + type: array + description: | + Extra ports for the traefik container within the autohttps pod + that you would like to expose, formatted in a k8s native way. + extraStaticConfig: + type: object + additionalProperties: true + description: | + This refers to traefik's startup configuration. + + This Helm chart already provide such configuration, so this is a + place where you can merge in additional configuration. If you are + about to use this configuration, you may want to inspect the + default configuration declared + [here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml). + extraVolumes: *extraVolumes-spec + extraVolumeMounts: *extraVolumeMounts-spec + hsts: + type: object + additionalProperties: false + required: [includeSubdomains, maxAge, preload] + description: | + This section regards a HTTP Strict-Transport-Security (HSTS) + response header. It can act as a request for a visiting web + browsers to enforce HTTPS on their end in for a given time into + the future, and optionally also for future requests to subdomains. + + These settings relate to traefik configuration which we use as a + TLS termination proxy. + + See [Mozilla's + documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) + for more information. + properties: + includeSubdomains: + type: boolean + maxAge: + type: integer + preload: + type: boolean + image: *image-spec + resources: *resources-spec + serviceAccount: *serviceAccount + extraPodSpec: *extraPodSpec-spec + labels: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + K8s labels for the proxy pod. + + ```{note} + For consistency, this should really be located under + proxy.chp.labels but isn't for historical reasons. + ``` + annotations: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + K8s annotations for the proxy pod. + + ```{note} + For consistency, this should really be located under + proxy.chp.annotations but isn't for historical reasons. + ``` + deploymentStrategy: + type: object + additionalProperties: false + properties: + rollingUpdate: + type: [string, "null"] + type: + type: [string, "null"] + description: | + While the proxy pod running + [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy) + could run in parallel, two instances running in parallel wouldn't + both receive updates from JupyterHub regarding how it should route + traffic. Due to this we default to using a deployment strategy of + Recreate instead of RollingUpdate. + secretSync: + type: object + additionalProperties: false + description: | + This configuration section refers to configuration of the sidecar + container in the autohttps pod running next to its traefik container + responsible for TLS termination. + + The purpose of this container is to store away and load TLS + certificates from a k8s Secret. The TLS certificates are acquired by + the ACME client (LEGO) that is running within the traefik container, + where traefik is using them for TLS termination. + properties: + containerSecurityContext: *containerSecurityContext-spec + image: *image-spec + resources: *resources-spec + + singleuser: + type: object + additionalProperties: false + description: | + Options for customizing the environment that is provided to the users after they log in. + properties: + networkPolicy: *networkPolicy-spec + podNameTemplate: + type: [string, "null"] + description: | + Passthrough configuration for + [KubeSpawner.pod_name_template](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.pod_name_template). + cpu: + type: object + additionalProperties: false + description: | + Set CPU limits & guarantees that are enforced for each user. + + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) + for more info. + properties: + limit: + type: [number, "null"] + guarantee: + type: [number, "null"] + memory: + type: object + additionalProperties: false + description: | + Set Memory limits & guarantees that are enforced for each user. + + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) + for more info. + properties: + limit: + type: [number, string, "null"] + guarantee: + type: [number, string, "null"] + description: | + Note that this field is referred to as *requests* by the Kubernetes API. + image: *image-spec + initContainers: + type: array + description: | + list of initContainers to be run every singleuser pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) + + ```yaml + singleuser: + initContainers: + - name: init-myservice + image: busybox:1.28 + command: ['sh', '-c', 'command1'] + - name: init-mydb + image: busybox:1.28 + command: ['sh', '-c', 'command2'] + ``` + profileList: + type: array + description: | + For more information about the profile list, see [KubeSpawner's + documentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner) + as this is simply a passthrough to that configuration. + + ```{note} + The image-pullers are aware of the overrides of images in + `singleuser.profileList` but they won't be if you configure it in + JupyterHub's configuration of '`c.KubeSpawner.profile_list`. + ``` + + ```yaml + singleuser: + profileList: + - display_name: "Default: Shared, 8 CPU cores" + description: "Your code will run on a shared machine with CPU only." + default: True + - display_name: "Personal, 4 CPU cores & 26GB RAM, 1 NVIDIA Tesla K80 GPU" + description: "Your code will run a personal machine with a GPU." + kubespawner_override: + extra_resource_limits: + nvidia.com/gpu: "1" + ``` + extraFiles: *extraFiles + extraEnv: + type: [object, array] + additionalProperties: true + description: | + Extra environment variables that should be set for the user pods. + + String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which + is a part of Kubernetes. Note that the user pods will already have + access to a set of environment variables that you can use, like + `JUPYTERHUB_USER` and `JUPYTERHUB_HOST`. For more information about these + inspect [this source + code](https://github.com/jupyterhub/jupyterhub/blob/cc8e7806530466dce8968567d1bbd2b39a7afa26/jupyterhub/spawner.py#L763). + + ```yaml + singleuser: + extraEnv: + # basic notation (for literal values only) + MY_ENV_VARS_NAME1: "my env var value 1" + + # explicit notation (the "name" field takes precedence) + USER_NAMESPACE: + name: USER_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + + # implicit notation (the "name" field is implied) + PREFIXED_USER_NAMESPACE: + value: "my-prefix-$(USER_NAMESPACE)" + SECRET_VALUE: + valueFrom: + secretKeyRef: + name: my-k8s-secret + key: password + ``` + + For more information, see the [Kubernetes EnvVar + specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core). + nodeSelector: *nodeSelector-spec + extraTolerations: *tolerations-spec + extraNodeAffinity: + type: object + additionalProperties: false + description: | + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node where they are to be scheduled to have a + certain label (node affinity). They may also require to be scheduled + in proximity or with a lack of proximity to another pod (pod affinity + and anti pod affinity). + + See the [Kubernetes + docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) + for more info. + properties: + required: + type: array + description: | + Pass this field an array of + [`NodeSelectorTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#nodeselectorterm-v1-core) + objects. + preferred: + type: array + description: | + Pass this field an array of + [`PreferredSchedulingTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#preferredschedulingterm-v1-core) + objects. + extraPodAffinity: + type: object + additionalProperties: false + description: | + See the description of `singleuser.extraNodeAffinity`. + properties: + required: + type: array + description: | + Pass this field an array of + [`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core) + objects. + preferred: + type: array + description: | + Pass this field an array of + [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core) + objects. + extraPodAntiAffinity: + type: object + additionalProperties: false + description: | + See the description of `singleuser.extraNodeAffinity`. + properties: + required: + type: array + description: | + Pass this field an array of + [`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core) + objects. + preferred: + type: array + description: | + Pass this field an array of + [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core) + objects. + cloudMetadata: + type: object + additionalProperties: false + required: [blockWithIptables, ip] + description: | + Please refer to dedicated section in [the Helm chart + documentation](block-metadata-iptables) for more information about + this. + properties: + blockWithIptables: + type: boolean + ip: + type: string + + cmd: + type: [array, string, "null"] + description: | + Passthrough configuration for + [KubeSpawner.cmd](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.cmd). + The default is "jupyterhub-singleuser". + Use `cmd: null` to launch a custom CMD from the image, + which must launch jupyterhub-singleuser or an equivalent process eventually. + For example: Jupyter's docker-stacks images. + defaultUrl: + type: [string, "null"] + description: | + Passthrough configuration for + [KubeSpawner.default_url](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.default_url). + # FIXME: name mismatch, named events_enabled in kubespawner + events: + type: [boolean, "null"] + description: | + Passthrough configuration for + [KubeSpawner.events_enabled](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.events_enabled). + extraAnnotations: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Passthrough configuration for + [KubeSpawner.extra_annotations](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_annotations). + extraContainers: + type: array + description: | + Passthrough configuration for + [KubeSpawner.extra_containers](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_containers). + extraLabels: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Passthrough configuration for + [KubeSpawner.extra_labels](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_labels). + extraPodConfig: + type: object + additionalProperties: true + description: | + Passthrough configuration for + [KubeSpawner.extra_pod_config](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_pod_config). + extraResource: + type: object + additionalProperties: false + properties: + # FIXME: name mismatch, named extra_resource_guarantees in kubespawner + guarantees: + type: object + additionalProperties: true + description: | + Passthrough configuration for + [KubeSpawner.extra_resource_guarantees](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_guarantees). + # FIXME: name mismatch, named extra_resource_limits in kubespawner + limits: + type: object + additionalProperties: true + description: | + Passthrough configuration for + [KubeSpawner.extra_resource_limits](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_limits). + fsGid: + type: [integer, "null"] + description: | + Passthrough configuration for + [KubeSpawner.fs_gid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.fs_gid). + lifecycleHooks: + type: object + additionalProperties: false + description: | + Passthrough configuration for + [KubeSpawner.lifecycle_hooks](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.lifecycle_hooks). + properties: + postStart: + type: object + additionalProperties: true + preStop: + type: object + additionalProperties: true + networkTools: + type: object + additionalProperties: false + description: | + This configuration section refers to configuration of a conditionally + created initContainer for the user pods with a purpose to block a + specific IP address. + + This initContainer will be created if + [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables) + is set to true. + properties: + image: *image-spec + resources: *resources-spec + # FIXME: name mismatch, named service_account in kubespawner + serviceAccountName: + type: [string, "null"] + description: | + Passthrough configuration for + [KubeSpawner.service_account](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.service_account). + startTimeout: + type: [integer, "null"] + description: | + Passthrough configuration for + [KubeSpawner.start_timeout](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.start_timeout). + storage: + type: object + additionalProperties: false + required: [type, homeMountPath] + description: | + This section configures KubeSpawner directly to some extent but also + indirectly through Helm chart specific configuration options such as + [`singleuser.storage.type`](schema_singleuser.storage.type). + properties: + capacity: + type: [string, "null"] + description: | + Configures `KubeSpawner.storage_capacity`. + + See the [KubeSpawner + documentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html) + for more information. + dynamic: + type: object + additionalProperties: false + properties: + pvcNameTemplate: + type: [string, "null"] + description: | + Configures `KubeSpawner.pvc_name_template` which will be the + resource name of the PVC created by KubeSpawner for each user + if needed. + storageAccessModes: + type: array + items: + type: [string, "null"] + description: | + Configures `KubeSpawner.storage_access_modes`. + + See KubeSpawners documentation and [the k8s + documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) + for more information. + storageClass: + type: [string, "null"] + description: | + Configures `KubeSpawner.storage_class`, which can be an + explicit StorageClass to dynamically provision storage for the + PVC that KubeSpawner will create. + + There is of a default StorageClass available in k8s clusters + for use if this is unspecified. + volumeNameTemplate: + type: [string, "null"] + description: | + Configures `KubeSpawner.volume_name_template`, which is the + name to reference from the containers volumeMounts section. + extraLabels: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Configures `KubeSpawner.storage_extra_labels`. Note that these + labels are set on the PVC during creation only and won't be + updated after creation. + extraVolumeMounts: *extraVolumeMounts-spec + extraVolumes: *extraVolumes-spec + homeMountPath: + type: string + description: | + The location within the container where the home folder storage + should be mounted. + static: + type: object + additionalProperties: false + properties: + pvcName: + type: [string, "null"] + description: | + Configures `KubeSpawner.pvc_claim_name` to reference + pre-existing storage. + subPath: + type: [string, "null"] + description: | + Configures the `subPath` field of a + `KubeSpawner.volume_mounts` entry added by the Helm chart. + + Path within the volume from which the container's volume + should be mounted. + type: + enum: [dynamic, static, none] + description: | + Decide if you want storage to be provisioned dynamically + (dynamic), or if you want to attach existing storage (static), or + don't want any storage to be attached (none). + allowPrivilegeEscalation: + type: [boolean, "null"] + description: | + Passthrough configuration for + [KubeSpawner.allow_privilege_escalation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.allow_privilege_escalation). + uid: + type: [integer, "null"] + description: | + Passthrough configuration for + [KubeSpawner.uid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.uid). + + This dictates as what user the main container will start up as. + + As an example of when this is needed, consider if you want to enable + sudo rights for some of your users. This can be done by starting up as + root, enabling it from the container in a startup script, and then + transitioning to the normal user. + + Default is 1000, set to null to use the container's default. + + scheduling: + type: object + additionalProperties: false + description: | + Objects for customizing the scheduling of various pods on the nodes and + related labels. + properties: + userScheduler: + type: object + additionalProperties: false + required: [enabled, plugins, pluginConfig, logLevel] + description: | + The user scheduler is making sure that user pods are scheduled + tight on nodes, this is useful for autoscaling of user node pools. + properties: + enabled: + type: boolean + description: | + Enables the user scheduler. + revisionHistoryLimit: *revisionHistoryLimit + replicas: + type: integer + description: | + You can have multiple schedulers to share the workload or improve + availability on node failure. + image: *image-spec + pdb: *pdb-spec + nodeSelector: *nodeSelector-spec + tolerations: *tolerations-spec + labels: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Extra labels to add to the userScheduler pods. + + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + to learn more about labels. + annotations: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Extra annotations to add to the user-scheduler pods. + containerSecurityContext: *containerSecurityContext-spec + logLevel: + type: integer + description: | + Corresponds to the verbosity level of logging made by the + kube-scheduler binary running within the user-scheduler pod. + plugins: + type: object + additionalProperties: true + description: | + These plugins refers to kube-scheduler plugins as documented + [here](https://kubernetes.io/docs/reference/scheduling/config/). + + The user-scheduler is really just a kube-scheduler configured in a + way to pack users tight on nodes using these plugins. See + values.yaml for information about the default plugins. + pluginConfig: + type: array + description: | + Individually activated plugins can be configured further. + resources: *resources-spec + serviceAccount: *serviceAccount + extraPodSpec: *extraPodSpec-spec + podPriority: + type: object + additionalProperties: false + description: | + Pod Priority is used to allow real users evict user placeholder pods + that in turn by entering a Pending state can trigger a scale up by a + cluster autoscaler. + + Having this option enabled only make sense if the following conditions + are met: + + 1. A cluster autoscaler is installed. + 2. user-placeholer pods are configured to have a priority equal or + higher than the cluster autoscaler's "priority cutoff" so that the + cluster autoscaler scales up a node in advance for a pending user + placeholder pod. + 3. Normal user pods have a higher priority than the user-placeholder + pods. + 4. Image puller pods have a priority between normal user pods and + user-placeholder pods. + + Note that if the default priority cutoff if not configured on cluster + autoscaler, it will currently default to 0, and that in the future + this is meant to be lowered. If your cloud provider is installing the + cluster autoscaler for you, they may also configure this specifically. + + Recommended settings for a cluster autoscaler... + + ... with a priority cutoff of -10 (GKE): + + ```yaml + podPriority: + enabled: true + globalDefault: false + defaultPriority: 0 + imagePullerPriority: -5 + userPlaceholderPriority: -10 + ``` + + ... with a priority cutoff of 0: + + ```yaml + podPriority: + enabled: true + globalDefault: true + defaultPriority: 10 + imagePullerPriority: 5 + userPlaceholderPriority: 0 + ``` + properties: + enabled: + type: boolean + globalDefault: + type: boolean + description: | + Warning! This will influence all pods in the cluster. + + The priority a pod usually get is 0. But this can be overridden + with a PriorityClass resource if it is declared to be the global + default. This configuration option allows for the creation of such + global default. + defaultPriority: + type: integer + description: | + The actual value for the default pod priority. + imagePullerPriority: + type: integer + description: | + The actual value for the [hook|continuous]-image-puller pods' priority. + userPlaceholderPriority: + type: integer + description: | + The actual value for the user-placeholder pods' priority. + userPlaceholder: + type: object + additionalProperties: false + description: | + User placeholders simulate users but will thanks to PodPriority be + evicted by the cluster autoscaler if a real user shows up. In this way + placeholders allow you to create a headroom for the real users and + reduce the risk of a user having to wait for a node to be added. Be + sure to use the the continuous image puller as well along with + placeholders, so the images are also available when real users arrive. + + To test your setup efficiently, you can adjust the amount of user + placeholders with the following command: + ```sh + # Configure to have 3 user placeholders + kubectl scale sts/user-placeholder --replicas=3 + ``` + properties: + enabled: + type: boolean + image: *image-spec + revisionHistoryLimit: *revisionHistoryLimit + replicas: + type: integer + description: | + How many placeholder pods would you like to have? + labels: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Extra labels to add to the userPlaceholder pods. + + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + to learn more about labels. + annotations: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Extra annotations to add to the placeholder pods. + resources: + type: object + additionalProperties: true + description: | + Unless specified here, the placeholder pods will request the same + resources specified for the real singleuser pods. + containerSecurityContext: *containerSecurityContext-spec + corePods: + type: object + additionalProperties: false + description: | + These settings influence the core pods like the hub, proxy and + user-scheduler pods. + These settings influence all pods considered core pods, namely: + + - hub + - proxy + - autohttps + - hook-image-awaiter + - user-scheduler + + By defaults, the tolerations are: + + - hub.jupyter.org/dedicated=core:NoSchedule + - hub.jupyter.org_dedicated=core:NoSchedule + + Note that tolerations set here are combined with the respective + components dedicated tolerations, and that `_` is available in case + `/` isn't allowed in the clouds tolerations. + properties: + tolerations: *tolerations-spec + nodeAffinity: + type: object + additionalProperties: false + description: | + Where should pods be scheduled? Perhaps on nodes with a certain + label is preferred or even required? + properties: + matchNodePurpose: + enum: [ignore, prefer, require] + description: | + Decide if core pods *ignore*, *prefer* or *require* to + schedule on nodes with this label: + ``` + hub.jupyter.org/node-purpose=core + ``` + userPods: + type: object + additionalProperties: false + description: | + These settings influence all pods considered user pods, namely: + + - user-placeholder + - hook-image-puller + - continuous-image-puller + - jupyter- + + By defaults, the tolerations are: + + - hub.jupyter.org/dedicated=core:NoSchedule + - hub.jupyter.org_dedicated=core:NoSchedule + + Note that tolerations set here are combined with the respective + components dedicated tolerations, and that `_` is available in case + `/` isn't allowed in the clouds tolerations. + properties: + tolerations: *tolerations-spec + nodeAffinity: + type: object + additionalProperties: false + description: | + Where should pods be scheduled? Perhaps on nodes with a certain + label is preferred or even required? + properties: + matchNodePurpose: + enum: [ignore, prefer, require] + description: | + Decide if user pods *ignore*, *prefer* or *require* to + schedule on nodes with this label: + ``` + hub.jupyter.org/node-purpose=user + ``` + + ingress: + type: object + additionalProperties: false + required: [enabled] + properties: + enabled: + type: boolean + description: | + Enable the creation of a Kubernetes Ingress to proxy-public service. + + See [Advanced Topics — Zero to JupyterHub with Kubernetes + 0.7.0 documentation](ingress) + for more details. + annotations: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Annotations to apply to the Ingress resource. + + See [the Kubernetes + documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + for more details about annotations. + ingressClassName: + type: [string, "null"] + description: | + Maps directly to the Ingress resource's `spec.ingressClassName``. + + See [the Kubernetes + documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) + for more details. + hosts: + type: array + description: | + List of hosts to route requests to the proxy. + pathSuffix: + type: [string, "null"] + description: | + Suffix added to Ingress's routing path pattern. + + Specify `*` if your ingress matches path by glob pattern. + pathType: + enum: [Prefix, Exact, ImplementationSpecific] + description: | + The path type to use. The default value is 'Prefix'. + + See [the Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types) + for more details about path types. + tls: + type: array + description: | + TLS configurations for Ingress. + + See [the Kubernetes + documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) + for more details about annotations. + + prePuller: + type: object + additionalProperties: false + required: [hook, continuous] + properties: + revisionHistoryLimit: *revisionHistoryLimit + labels: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Extra labels to add to the pre puller job pods. + + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + to learn more about labels. + annotations: + type: object + additionalProperties: false + patternProperties: *labels-and-annotations-patternProperties + description: | + Annotations to apply to the hook and continous image puller pods. One example use case is to + disable istio sidecars which could interfere with the image pulling. + resources: + type: object + additionalProperties: true + description: | + These are standard Kubernetes resources with requests and limits for + cpu and memory. They will be used on the containers in the pods + pulling images. These should be set extremely low as the containers + shut down directly or is a pause container that just idles. + + They were made configurable as usage of ResourceQuota may require + containers in the namespace to have explicit resources set. + extraTolerations: *tolerations-spec + hook: + type: object + additionalProperties: false + required: [enabled] + description: | + See the [*optimization + section*](pulling-images-before-users-arrive) + for more details. + properties: + enabled: + type: boolean + pullOnlyOnChanges: + type: boolean + description: | + Pull only if changes have been made to the images to pull, or more + accurately if the hook-image-puller daemonset has changed in any + way. + podSchedulingWaitDuration: + description: | + The `hook-image-awaiter` has a criteria to await all the + `hook-image-puller` DaemonSet's pods to both schedule and finish + their image pulling. This flag can be used to relax this criteria + to instead only await the pods that _has already scheduled_ to + finish image pulling after a certain duration. + + The value of this is that sometimes the newly created + `hook-image-puller` pods cannot be scheduled because nodes are + full, and then it probably won't make sense to block a `helm + upgrade`. + + An infinite duration to wait for pods to schedule can be + represented by `-1`. This was the default behavior of version + 0.9.0 and earlier. + type: integer + nodeSelector: *nodeSelector-spec + tolerations: *tolerations-spec + containerSecurityContext: *containerSecurityContext-spec + image: *image-spec + resources: *resources-spec + serviceAccount: *serviceAccount + continuous: + type: object + additionalProperties: false + required: [enabled] + description: | + See the [*optimization + section*](pulling-images-before-users-arrive) + for more details. + + ```{note} + If used with a Cluster Autoscaler (an autoscaling node pool), also add + user-placeholders and enable pod priority. + ``` + properties: + enabled: + type: boolean + pullProfileListImages: + type: boolean + description: | + The singleuser.profileList configuration can provide a selection of + images. This option determines if all images identified there should + be pulled, both by the hook and continuous pullers. + + Images are looked for under `kubespawner_override`, and also + `profile_options.choices.kubespawner_override` since version 3.2.0. + + The reason to disable this, is that if you have for example 10 images + which start pulling in order from 1 to 10, a user that arrives and + wants to start a pod with image number 10 will need to wait for all + images to be pulled, and then it may be preferable to just let the + user arriving wait for a single image to be pulled on arrival. + extraImages: + type: object + additionalProperties: false + description: | + See the [*optimization section*](images-that-will-be-pulled) for more + details. + + ```yaml + prePuller: + extraImages: + my-extra-image-i-want-pulled: + name: jupyter/all-spark-notebook + tag: 2343e33dec46 + ``` + patternProperties: + ".*": + type: object + additionalProperties: false + required: [name, tag] + properties: + name: + type: string + tag: + type: string + containerSecurityContext: *containerSecurityContext-spec + pause: + type: object + additionalProperties: false + description: | + The image-puller pods rely on initContainer to pull all images, and + their actual container when they are done is just running a `pause` + container. These are settings for that pause container. + properties: + containerSecurityContext: *containerSecurityContext-spec + image: *image-spec + + custom: + type: object + additionalProperties: true + description: | + Additional values to pass to the Hub. + JupyterHub will not itself look at these, + but you can read values in your own custom config via `hub.extraConfig`. + For example: + + ```yaml + custom: + myHost: "https://example.horse" + hub: + extraConfig: + myConfig.py: | + c.MyAuthenticator.host = get_config("custom.myHost") + ``` + + cull: + type: object + additionalProperties: false + required: [enabled] + description: | + The + [jupyterhub-idle-culler](https://github.com/jupyterhub/jupyterhub-idle-culler) + can run as a JupyterHub managed service to _cull_ running servers. + properties: + enabled: + type: boolean + description: | + Enable/disable use of jupyter-idle-culler. + users: + type: [boolean, "null"] + description: See the `--cull-users` flag. + adminUsers: + type: [boolean, "null"] + description: See the `--cull-admin-users` flag. + removeNamedServers: + type: [boolean, "null"] + description: See the `--remove-named-servers` flag. + timeout: + type: [integer, "null"] + description: See the `--timeout` flag. + every: + type: [integer, "null"] + description: See the `--cull-every` flag. + concurrency: + type: [integer, "null"] + description: See the `--concurrency` flag. + maxAge: + type: [integer, "null"] + description: See the `--max-age` flag. + + debug: + type: object + additionalProperties: false + required: [enabled] + properties: + enabled: + type: boolean + description: | + Increases the loglevel throughout the resources in the Helm chart. + + rbac: + type: object + additionalProperties: false + required: [create] + properties: + enabled: + type: boolean + # This schema entry is needed to help us print a more helpful error + # message in NOTES.txt if hub.fsGid is set. + # + description: | + ````{note} + Removed in version 2.0.0. If you have been using `rbac.enable=false` + (strongly discouraged), then the equivalent configuration would be: + + ```yaml + rbac: + create: false + hub: + serviceAccount: + create: false + proxy: + traefik: + serviceAccount: + create: false + scheduling: + userScheduler: + serviceAccount: + create: false + prePuller: + hook: + serviceAccount: + create: false + ``` + ```` + create: + type: boolean + description: | + Decides if (Cluster)Role and (Cluster)RoleBinding resources are + created and bound to the configured serviceAccounts. + + global: + type: object + additionalProperties: true + properties: + safeToShowValues: + type: boolean + description: | + A flag that should only be set to true temporarily when experiencing a + deprecation message that contain censored content that you wish to + reveal. diff --git a/deploy-as-code/helm/charts/backbone-services/jupyterhub/values.yaml b/deploy-as-code/helm/charts/backbone-services/jupyterhub/values.yaml new file mode 100644 index 000000000..eccbc7606 --- /dev/null +++ b/deploy-as-code/helm/charts/backbone-services/jupyterhub/values.yaml @@ -0,0 +1,681 @@ +# fullnameOverride and nameOverride distinguishes blank strings, null values, +# and non-blank strings. For more details, see the configuration reference. +fullnameOverride: "jupyter" +nameOverride: +namespace: jupyterhub +# enabled is ignored by the jupyterhub chart itself, but a chart depending on +# the jupyterhub chart conditionally can make use this config option as the +# condition. +enabled: + +# custom can contain anything you want to pass to the hub pod, as all passed +# Helm template values will be made available there. +custom: {} + +# imagePullSecret is configuration to create a k8s Secret that Helm chart's pods +# can get credentials from to pull their images. +imagePullSecret: + create: false + automaticReferenceInjection: true + registry: + username: + password: + email: +# imagePullSecrets is configuration to reference the k8s Secret resources the +# Helm chart's pods can get credentials from to pull their images. +imagePullSecrets: [] + +# hub relates to the hub pod, responsible for running JupyterHub, its configured +# Authenticator class KubeSpawner, and its configured Proxy class +# ConfigurableHTTPProxy. KubeSpawner creates the user pods, and +# ConfigurableHTTPProxy speaks with the actual ConfigurableHTTPProxy server in +# the proxy pod. +hub: + revisionHistoryLimit: + config: + Authenticator: + admin_users: + - admin-user + NativeAuthenticator: + minimum_password_length: 8 + check_common_password: false + allowed_failed_logins: 5 + seconds_before_next_try: 300 + JupyterHub: + authenticator_class: "nativeauthenticator.NativeAuthenticator" + admin_access: true + service: + type: ClusterIP + annotations: {} + ports: + nodePort: + extraPorts: [] + loadBalancerIP: + baseUrl: /jupyter + cookieSecret: + initContainers: [] + nodeSelector: {} + tolerations: [] + concurrentSpawnLimit: 64 + consecutiveFailureLimit: 5 + activeServerLimit: + deploymentStrategy: + ## type: Recreate + ## - sqlite-pvc backed hubs require the Recreate deployment strategy as a + ## typical PVC storage can only be bound to one pod at the time. + ## - JupyterHub isn't designed to support being run in parallell. More work + ## needs to be done in JupyterHub itself for a fully highly available (HA) + ## deployment of JupyterHub on k8s is to be possible. + type: Recreate + db: + type: sqlite-pvc + upgrade: + pvc: + annotations: {} + selector: {} + accessModes: + - ReadWriteOnce + storage: 1Gi + subPath: + storageClassName: + url: + password: + labels: {} + annotations: {} + command: [] + args: [] + extraConfig: {} + extraFiles: {} + extraEnv: {} + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + image: + name: quay.io/jupyterhub/k8s-hub + tag: "3.3.6" + pullPolicy: + pullSecrets: [] + resources: {} + podSecurityContext: + fsGroup: 1000 + containerSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + allowPrivilegeEscalation: false + lifecycle: {} + loadRoles: {} + services: {} + pdb: + enabled: false + maxUnavailable: + minAvailable: 1 + networkPolicy: + enabled: true + ingress: [] + egress: [] + egressAllowRules: + cloudMetadataServer: true + dnsPortsCloudMetadataServer: true + dnsPortsKubeSystemNamespace: true + dnsPortsPrivateIPs: true + nonPrivateIPs: true + privateIPs: true + interNamespaceAccessLabels: ignore + allowedIngressPorts: [] + allowNamedServers: false + namedServerLimitPerUser: + authenticatePrometheus: + redirectToServer: + shutdownOnLogout: + templatePaths: [] + templateVars: {} + livenessProbe: + # The livenessProbe's aim to give JupyterHub sufficient time to startup but + # be able to restart if it becomes unresponsive for ~5 min. + enabled: true + initialDelaySeconds: 300 + periodSeconds: 10 + failureThreshold: 30 + timeoutSeconds: 3 + readinessProbe: + # The readinessProbe's aim is to provide a successful startup indication, + # but following that never become unready before its livenessProbe fail and + # restarts it if needed. To become unready following startup serves no + # purpose as there are no other pod to fallback to in our non-HA deployment. + enabled: true + initialDelaySeconds: 0 + periodSeconds: 2 + failureThreshold: 1000 + timeoutSeconds: 1 + existingSecret: + serviceAccount: + create: true + name: + annotations: {} + extraPodSpec: {} + +rbac: + create: true + +# proxy relates to the proxy pod, the proxy-public service, and the autohttps +# pod and proxy-http service. +proxy: + secretToken: '10e3d5d41c6f1772f394d8fab98e13731ad2b28bdfb6114cfaf58bde5030a40e' + annotations: {} + deploymentStrategy: + ## type: Recreate + ## - JupyterHub's interaction with the CHP proxy becomes a lot more robust + ## with this configuration. To understand this, consider that JupyterHub + ## during startup will interact a lot with the k8s service to reach a + ## ready proxy pod. If the hub pod during a helm upgrade is restarting + ## directly while the proxy pod is making a rolling upgrade, the hub pod + ## could end up running a sequence of interactions with the old proxy pod + ## and finishing up the sequence of interactions with the new proxy pod. + ## As CHP proxy pods carry individual state this is very error prone. One + ## outcome when not using Recreate as a strategy has been that user pods + ## have been deleted by the hub pod because it considered them unreachable + ## as it only configured the old proxy pod but not the new before trying + ## to reach them. + type: Recreate + ## rollingUpdate: + ## - WARNING: + ## This is required to be set explicitly blank! Without it being + ## explicitly blank, k8s will let eventual old values under rollingUpdate + ## remain and then the Deployment becomes invalid and a helm upgrade would + ## fail with an error like this: + ## + ## UPGRADE FAILED + ## Error: Deployment.apps "proxy" is invalid: spec.strategy.rollingUpdate: Forbidden: may not be specified when strategy `type` is 'Recreate' + ## Error: UPGRADE FAILED: Deployment.apps "proxy" is invalid: spec.strategy.rollingUpdate: Forbidden: may not be specified when strategy `type` is 'Recreate' + rollingUpdate: + # service relates to the proxy-public service + service: + type: ClusterIP + labels: {} + annotations: {} + nodePorts: + http: + https: + disableHttpPort: false + extraPorts: [] + loadBalancerIP: + loadBalancerSourceRanges: [] + # chp relates to the proxy pod, which is responsible for routing traffic based + # on dynamic configuration sent from JupyterHub to CHP's REST API. + chp: + revisionHistoryLimit: + containerSecurityContext: + runAsUser: 65534 # nobody user + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + image: + name: quay.io/jupyterhub/configurable-http-proxy + # tag is automatically bumped to new patch versions by the + # watch-dependencies.yaml workflow. + # + tag: "4.6.1" # https://github.com/jupyterhub/configurable-http-proxy/tags + pullPolicy: + pullSecrets: [] + extraCommandLineFlags: [] + livenessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 10 + failureThreshold: 30 + timeoutSeconds: 3 + readinessProbe: + enabled: true + initialDelaySeconds: 0 + periodSeconds: 2 + failureThreshold: 1000 + timeoutSeconds: 1 + resources: {} + defaultTarget: + errorTarget: + extraEnv: {} + nodeSelector: {} + tolerations: [] + networkPolicy: + enabled: true + ingress: [] + egress: [] + egressAllowRules: + cloudMetadataServer: true + dnsPortsCloudMetadataServer: true + dnsPortsKubeSystemNamespace: true + dnsPortsPrivateIPs: true + nonPrivateIPs: true + privateIPs: true + interNamespaceAccessLabels: ignore + allowedIngressPorts: [http, https] + pdb: + enabled: false + maxUnavailable: + minAvailable: 1 + extraPodSpec: {} + # traefik relates to the autohttps pod, which is responsible for TLS + # termination when proxy.https.type=letsencrypt. + traefik: + revisionHistoryLimit: + containerSecurityContext: + runAsUser: 65534 # nobody user + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + image: + name: traefik + # tag is automatically bumped to new patch versions by the + # watch-dependencies.yaml workflow. + # + tag: "v2.11.0" # ref: https://hub.docker.com/_/traefik?tab=tags + pullPolicy: + pullSecrets: [] + hsts: + includeSubdomains: false + preload: false + maxAge: 15724800 # About 6 months + resources: {} + labels: {} + extraInitContainers: [] + extraEnv: {} + extraVolumes: [] + extraVolumeMounts: [] + extraStaticConfig: {} + extraDynamicConfig: {} + nodeSelector: {} + tolerations: [] + extraPorts: [] + networkPolicy: + enabled: true + ingress: [] + egress: [] + egressAllowRules: + cloudMetadataServer: true + dnsPortsCloudMetadataServer: true + dnsPortsKubeSystemNamespace: true + dnsPortsPrivateIPs: true + nonPrivateIPs: true + privateIPs: true + interNamespaceAccessLabels: ignore + allowedIngressPorts: [http, https] + pdb: + enabled: false + maxUnavailable: + minAvailable: 1 + serviceAccount: + create: true + name: + annotations: {} + extraPodSpec: {} + secretSync: + containerSecurityContext: + runAsUser: 65534 # nobody user + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + image: + name: quay.io/jupyterhub/k8s-secret-sync + tag: "3.3.6" + pullPolicy: + pullSecrets: [] + resources: {} + labels: {} + https: + enabled: false + type: letsencrypt + #type: letsencrypt, manual, offload, secret + letsencrypt: + contactEmail: + # Specify custom server here (https://acme-staging-v02.api.letsencrypt.org/directory) to hit staging LE + acmeServer: https://acme-v02.api.letsencrypt.org/directory + manual: + key: + cert: + secret: + name: + key: tls.key + crt: tls.crt + hosts: [] + +# singleuser relates to the configuration of KubeSpawner which runs in the hub +# pod, and its spawning of user pods such as jupyter-myusername. +singleuser: + podNameTemplate: + extraTolerations: [] + nodeSelector: {} + extraNodeAffinity: + required: [] + preferred: [] + extraPodAffinity: + required: [] + preferred: [] + extraPodAntiAffinity: + required: [] + preferred: [] + networkTools: + image: + name: quay.io/jupyterhub/k8s-network-tools + tag: "3.3.6" + pullPolicy: + pullSecrets: [] + resources: {} + cloudMetadata: + # block set to true will append a privileged initContainer using the + # iptables to block the sensitive metadata server at the provided ip. + blockWithIptables: false + ip: 169.254.169.254 + networkPolicy: + enabled: false + ingress: [] + egress: [] + egressAllowRules: + cloudMetadataServer: true + dnsPortsCloudMetadataServer: true + dnsPortsKubeSystemNamespace: true + dnsPortsPrivateIPs: true + nonPrivateIPs: true + privateIPs: true + interNamespaceAccessLabels: ignore + allowedIngressPorts: [] + events: true + extraAnnotations: {} + extraLabels: + hub.jupyter.org/network-access-hub: "true" + extraFiles: {} + extraEnv: {} + lifecycleHooks: {} + initContainers: [] + extraContainers: [] + allowPrivilegeEscalation: false + uid: 1000 + fsGid: 100 + serviceAccountName: + storage: + type: dynamic + extraLabels: {} + extraVolumes: [] + extraVolumeMounts: [] + static: + pvcName: + subPath: "{username}" + capacity: 10Gi + homeMountPath: /home/jovyan + dynamic: + storageClass: + pvcNameTemplate: claim-{username}{servername} + volumeNameTemplate: volume-{username}{servername} + storageAccessModes: [ReadWriteOnce] + image: + name: docker.io/egovio/jhub-singleuser + tag: "20240403-debug" + pullPolicy: + pullSecrets: [] + startTimeout: 300 + cpu: + limit: + guarantee: + memory: + limit: + guarantee: 1G + extraResource: + limits: {} + guarantees: {} + cmd: jupyterhub-singleuser + defaultUrl: + extraPodConfig: {} + profileList: [] + +# scheduling relates to the user-scheduler pods and user-placeholder pods. +scheduling: + userScheduler: + enabled: true + revisionHistoryLimit: + replicas: 1 + logLevel: 4 + # plugins are configured on the user-scheduler to make us score how we + # schedule user pods in a way to help us schedule on the most busy node. By + # doing this, we help scale down more effectively. It isn't obvious how to + # enable/disable scoring plugins, and configure them, to accomplish this. + # + # plugins ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins-1 + # migration ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduler-configuration-migrations + # + plugins: + score: + # These scoring plugins are enabled by default according to + # https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins + # 2022-02-22. + # + # Enabled with high priority: + # - NodeAffinity + # - InterPodAffinity + # - NodeResourcesFit + # - ImageLocality + # Remains enabled with low default priority: + # - TaintToleration + # - PodTopologySpread + # - VolumeBinding + # Disabled for scoring: + # - NodeResourcesBalancedAllocation + # + disabled: + # We disable these plugins (with regards to scoring) to not interfere + # or complicate our use of NodeResourcesFit. + - name: NodeResourcesBalancedAllocation + # Disable plugins to be allowed to enable them again with a different + # weight and avoid an error. + - name: NodeAffinity + - name: InterPodAffinity + - name: NodeResourcesFit + - name: ImageLocality + enabled: + - name: NodeAffinity + weight: 14631 + - name: InterPodAffinity + weight: 1331 + - name: NodeResourcesFit + weight: 121 + - name: ImageLocality + weight: 11 + pluginConfig: + # Here we declare that we should optimize pods to fit based on a + # MostAllocated strategy instead of the default LeastAllocated. + - name: NodeResourcesFit + args: + scoringStrategy: + resources: + - name: cpu + weight: 1 + - name: memory + weight: 1 + type: MostAllocated + containerSecurityContext: + runAsUser: 65534 # nobody user + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + image: + # IMPORTANT: Bumping the minor version of this binary should go hand in + # hand with an inspection of the user-scheduelrs RBAC resources + # that we have forked in + # templates/scheduling/user-scheduler/rbac.yaml. + # + # Debugging advice: + # + # - Is configuration of kube-scheduler broken in + # templates/scheduling/user-scheduler/configmap.yaml? + # + # - Is the kube-scheduler binary's compatibility to work + # against a k8s api-server that is too new or too old? + # + # - You can update the GitHub workflow that runs tests to + # include "deploy/user-scheduler" in the k8s namespace report + # and reduce the user-scheduler deployments replicas to 1 in + # dev-config.yaml to get relevant logs from the user-scheduler + # pods. Inspect the "Kubernetes namespace report" action! + # + # - Typical failures are that kube-scheduler fails to search for + # resources via its "informers", and won't start trying to + # schedule pods before they succeed which may require + # additional RBAC permissions or that the k8s api-server is + # aware of the resources. + # + # - If "successfully acquired lease" can be seen in the logs, it + # is a good sign kube-scheduler is ready to schedule pods. + # + name: registry.k8s.io/kube-scheduler + # tag is automatically bumped to new patch versions by the + # watch-dependencies.yaml workflow. The minor version is pinned in the + # workflow, and should be updated there if a minor version bump is done + # here. We aim to stay around 1 minor version behind the latest k8s + # version. + # + tag: "v1.26.15" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG + pullPolicy: + pullSecrets: [] + nodeSelector: {} + tolerations: [] + labels: {} + annotations: {} + pdb: + enabled: true + maxUnavailable: 1 + minAvailable: + resources: {} + serviceAccount: + create: true + name: + annotations: {} + extraPodSpec: {} + podPriority: + enabled: false + globalDefault: false + defaultPriority: 0 + imagePullerPriority: -5 + userPlaceholderPriority: -10 + userPlaceholder: + enabled: true + image: + name: registry.k8s.io/pause + # tag is automatically bumped to new patch versions by the + # watch-dependencies.yaml workflow. + # + # If you update this, also update prePuller.pause.image.tag + # + tag: "3.9" + pullPolicy: + pullSecrets: [] + revisionHistoryLimit: + replicas: 0 + labels: {} + annotations: {} + containerSecurityContext: + runAsUser: 65534 # nobody user + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + resources: {} + corePods: + tolerations: + - key: hub.jupyter.org/dedicated + operator: Equal + value: core + effect: NoSchedule + - key: hub.jupyter.org_dedicated + operator: Equal + value: core + effect: NoSchedule + nodeAffinity: + matchNodePurpose: prefer + userPods: + tolerations: + - key: hub.jupyter.org/dedicated + operator: Equal + value: user + effect: NoSchedule + - key: hub.jupyter.org_dedicated + operator: Equal + value: user + effect: NoSchedule + nodeAffinity: + matchNodePurpose: prefer + +# prePuller relates to the hook|continuous-image-puller DaemonsSets +prePuller: + revisionHistoryLimit: + labels: {} + annotations: {} + resources: {} + containerSecurityContext: + runAsUser: 65534 # nobody user + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + extraTolerations: [] + # hook relates to the hook-image-awaiter Job and hook-image-puller DaemonSet + hook: + enabled: false + pullOnlyOnChanges: true + # image and the configuration below relates to the hook-image-awaiter Job + image: + name: quay.io/jupyterhub/k8s-image-awaiter + tag: "3.3.6" + pullPolicy: + pullSecrets: [] + containerSecurityContext: + runAsUser: 65534 # nobody user + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + podSchedulingWaitDuration: 10 + nodeSelector: {} + tolerations: [] + resources: {} + serviceAccount: + create: true + name: + annotations: {} + continuous: + enabled: true + pullProfileListImages: true + extraImages: {} + pause: + containerSecurityContext: + runAsUser: 65534 # nobody user + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + image: + name: registry.k8s.io/pause + # tag is automatically bumped to new patch versions by the + # watch-dependencies.yaml workflow. + # + # If you update this, also update scheduling.userPlaceholder.image.tag + # + tag: "3.9" + pullPolicy: + pullSecrets: [] + +ingress: + enabled: false + annotations: {} + ingressClassName: nginx + pathSuffix: hub + userPathSuffix: user(/|$)(.*) + pathType: Prefix + +# cull relates to the jupyterhub-idle-culler service, responsible for evicting +# inactive singleuser pods. +# +# The configuration below, except for enabled, corresponds to command-line flags +# for jupyterhub-idle-culler as documented here: +# https://github.com/jupyterhub/jupyterhub-idle-culler#as-a-standalone-script +# +cull: + enabled: true + users: false # --cull-users + adminUsers: true # --cull-admin-users + removeNamedServers: false # --remove-named-servers + timeout: 3600 # --timeout + every: 600 # --cull-every + concurrency: 10 # --concurrency + maxAge: 0 # --max-age + +debug: + enabled: false + +global: + safeToShowValues: false diff --git a/deploy-as-code/helm/environments/mukta-prod.yaml b/deploy-as-code/helm/environments/mukta-prod.yaml index 86cc4b55f..4935661e4 100644 --- a/deploy-as-code/helm/environments/mukta-prod.yaml +++ b/deploy-as-code/helm/environments/mukta-prod.yaml @@ -1086,4 +1086,26 @@ elasticsearch-master-v8: kibana-v8: image: - tag: 8.11.3 \ No newline at end of file + tag: 8.11.3 + +jupyterhub: + hub: + config: + Authenticator: + admin_users: + - muktaprodadmin + NativeAuthenticator: + minimum_password_length: 8 + check_common_password: false + allowed_failed_logins: 5 + seconds_before_next_try: 300 + JupyterHub: + admin_access: true + ingress: + enabled: true + hosts: + - mukta.odisha.gov.in + tls: + - hosts: + - mukta.odisha.gov.in + secretName: mukta.odisha.gov.in-tls-certs \ No newline at end of file diff --git a/deploy-as-code/helm/environments/mukta-uat.yaml b/deploy-as-code/helm/environments/mukta-uat.yaml index 945b2a136..41a872554 100644 --- a/deploy-as-code/helm/environments/mukta-uat.yaml +++ b/deploy-as-code/helm/environments/mukta-uat.yaml @@ -1016,3 +1016,25 @@ elasticsearch-master-v8: kibana-v8: image: tag: 8.11.3 + +jupyterhub: + hub: + config: + Authenticator: + admin_users: + - muktauatadmin + NativeAuthenticator: + minimum_password_length: 8 + check_common_password: false + allowed_failed_logins: 5 + seconds_before_next_try: 300 + JupyterHub: + admin_access: true + ingress: + enabled: true + hosts: + - mukta-uat.digit.org + tls: + - hosts: + - mukta-uat.digit.org + secretName: mukta-uat.digit.org-tls-certs