diff --git a/.dockerignore b/.dockerignore index bec10c10c..d1386e98c 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,3 +1,4 @@ ./.dapper ./.cache ./dist +./examples/cache/testmount diff --git a/.gitignore b/.gitignore index a878e1d77..da9e45521 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,5 @@ *.swp .idea .vscode/ +local-path-provisioner +/examples/cache/testmount diff --git a/README.md b/README.md index 609a84402..38817e44c 100644 --- a/README.md +++ b/README.md @@ -105,7 +105,7 @@ Now you've verified that the provisioner works as expected. ### Customize the ConfigMap -The configuration of the provisioner is a json file `config.json` and two bash scripts `setup` and `teardown`, stored in the a config map, e.g.: +The configuration of the provisioner is a json file `config.json`, a Pod template `helper-pod.yaml` and two bash scripts `setup` and `teardown`, e.g.: ``` kind: ConfigMap apiVersion: v1 @@ -132,41 +132,11 @@ data: } setup: |- #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - mkdir -m 0777 -p ${absolutePath} + mkdir -m 0777 -p "$VOL_DIR" teardown: |- #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - rm -rf ${absolutePath} - helperPod.yaml: |- + rm -rf "$VOL_DIR" + helper-pod.yaml: |- apiVersion: v1 kind: Pod metadata: @@ -195,16 +165,26 @@ The configuration must obey following rules: 3. No duplicate paths allowed for one node. 4. No duplicate node allowed. -#### Scripts `setup` and `teardown` and `helperPod.yaml` +#### Scripts `setup` and `teardown` and the `helper-pod.yaml` template -The script `setup` will be executed before the volume is created, to prepare the directory on the node for the volume. +* The `setup` script is run before the volume is created, to prepare the volume directory on the node. +* The `teardown` script is run after the volume is deleted, to cleanup the volume directory on the node. +* The `helper-pod.yaml` template is used to create a helper Pod that runs the `setup` or `teardown` script. -The script `teardown` will be executed after the volume is deleted, to cleanup the directory on the node for the volume. +The scripts receive their input as environment variables: -The yaml file `helperPod.yaml` will be created by local-path-storage to execute `setup` or `teardown` script with three paramemters `-p -s -m ` : -* path: the absolute path provisioned on the node -- size: pvc.Spec.resources.requests.storage in bytes -* mode: pvc.Spec.VolumeMode +| Environment variable | Description | +| -------------------- | ----------- | +| `VOL_DIR` | Volume directory that should be created or removed. | +| `VOL_NAME` | Name of the PersistentVolume. | +| `VOL_TYPE` | Type of the PersistentVolume (`Block` or `Filesystem`). | +| `VOL_SIZE_BYTES` | Requested volume size in bytes. | +| `PVC_NAME` | Name of the PersistentVolumeClaim. | +| `PVC_NAMESPACE` | Namespace of the PersistentVolumeClaim. | +| `PVC_ANNOTATION` | Value of the PersistentVolumeClaim annotation specified by the manager's `--pvc-annotation` option. | +| `PVC_ANNOTATION_{SUFFIX}` | Value of the PersistentVolumeClaim annotation with the prefix specified by the manager's `--pvc-annotation` option. The `SUFFIX` is the normalized path within the annotation name after the `/`. E.g. if `local-path-provisioner` is run with `--pvc-annotation=storage.example.org` the PVC annotation `storage.example.org/cache-name` is passed through to the Pod as env var `PVC_ANNOTATION_CACHE_NAME`. If the helper Pod requires such an annotation `local-path-provisioner` can be run with e.g. `--pvc-annotation-required=storage.example.org/cache-name`. | + +Additional environment variables and defaults for the optional `PVC_ANNOTATION*` can be specified within the helper Pod template. #### Reloading diff --git a/debug/config.yaml b/debug/config.yaml index 893b46cd8..dc9a3a21e 100644 --- a/debug/config.yaml +++ b/debug/config.yaml @@ -39,41 +39,11 @@ data: } setup: |- #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - mkdir -m 0777 -p ${absolutePath} + mkdir -m 0777 -p "$VOL_DIR" teardown: |- #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - rm -rf ${absolutePath} - helperPod.yaml: |- + rm -rf "$VOL_DIR" + helper-pod.yaml: |- apiVersion: v1 kind: Pod metadata: diff --git a/deploy/chart/templates/configmap.yaml b/deploy/chart/templates/configmap.yaml index b02505387..e880213c7 100644 --- a/deploy/chart/templates/configmap.yaml +++ b/deploy/chart/templates/configmap.yaml @@ -13,6 +13,6 @@ data: {{ .Values.configmap.setup | nindent 4 }} teardown: |- {{ .Values.configmap.teardown | nindent 4 }} - helperPod.yaml: |- + helper-pod.yaml: |- {{ .Values.configmap.helperPod | nindent 4 }} diff --git a/deploy/chart/values.yaml b/deploy/chart/values.yaml index 5d3a8f10c..77d768c34 100644 --- a/deploy/chart/values.yaml +++ b/deploy/chart/values.yaml @@ -93,41 +93,10 @@ configmap: # specify the custom script for setup and teardown setup: |- #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - mkdir -m 0777 -p ${absolutePath} + mkdir -m 0777 -p "$VOL_DIR" teardown: |- #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - rm -rf ${absolutePath} - # specify the custom helper pod yaml + rm -rf "$VOL_DIR" helperPod: |- apiVersion: v1 kind: Pod diff --git a/deploy/example-config.yaml b/deploy/example-config.yaml index 417cfb7f8..49699f4bb 100644 --- a/deploy/example-config.yaml +++ b/deploy/example-config.yaml @@ -23,41 +23,11 @@ data: } setup: |- #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - mkdir -m 0777 -p ${absolutePath} + mkdir -m 0777 -p "$VOL_DIR" teardown: |- #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - rm -rf ${absolutePath} - helperPod.yaml: |- + rm -rf "$VOL_DIR" + helper-pod.yaml: |- apiVersion: v1 kind: Pod metadata: @@ -66,5 +36,3 @@ data: containers: - name: helper-pod image: busybox - - diff --git a/deploy/local-path-storage.yaml b/deploy/local-path-storage.yaml index dc174ad07..30d7a21bd 100644 --- a/deploy/local-path-storage.yaml +++ b/deploy/local-path-storage.yaml @@ -104,41 +104,11 @@ data: } setup: |- #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - mkdir -m 0777 -p ${absolutePath} + mkdir -m 0777 -p "$VOL_DIR" teardown: |- #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - rm -rf ${absolutePath} - helperPod.yaml: |- + rm -rf "$VOL_DIR" + helper-pod.yaml: |- apiVersion: v1 kind: Pod metadata: diff --git a/examples/cache/README.md b/examples/cache/README.md new file mode 100644 index 000000000..868965d8d --- /dev/null +++ b/examples/cache/README.md @@ -0,0 +1,56 @@ +# Example cache provisioner + +This example shows how to use short-lived PersistentVolumes for caching. +A [buildah](https://github.com/containers/buildah)-based helper Pod is used to provision a container file system based on an image as PersistentVolume and commit it when deprovisioning. +Users can select the desired cache or rather the image using a PersistentVolumeClaim annotation that is passed through to the helper Pod as environment variable. + +While it is not part of this example caches could also be synchronized across nodes using an image registry. +The [cache-provisioner](https://github.com/mgoltzsche/cache-provisioner) project aims to achieve this as well as other cache management features. + +## Test + +### Test the helper Pod separately + +The helper Pod can be tested separately using docker locally: +```sh +./helper-test.sh +``` + +### Test the integration + +_Please note that a non-overlayfs storage directory (`/data/example-cache-storage`) must be configured._ +_The provided configuration is known to work with minikube (`minikube start`) and kind (`kind create cluster; kind export kubeconfig`)._ + +Install the example kustomization: +```sh +kustomize build . | kubectl apply -f - +``` + +If you want to test changes to the `local-path-provisioner` binary locally: +```sh +kubectl delete -n example-cache-storage deploy example-cache-local-path-provisioner +( + cd ../.. + go build . + ./local-path-provisioner --debug start \ + --namespace=example-cache-storage \ + --configmap-name=example-cache-local-path-config \ + --service-account-name=example-cache-local-path-provisioner-service-account \ + --provisioner-name=storage.example.org/cache \ + --pvc-annotation=storage.example.org \ + --pvc-annotation-required=storage.example.org/cache-name +) +``` + +Within another terminal create an example Pod and PVC that pulls and runs a container image using [podman](https://github.com/containers/podman): +```sh +kubectl apply -f test-pod.yaml +kubectl logs -f cached-build +``` + +If the Pod and PVC are removed and recreated you can observe that, during the 2nd Pod execution on the same node, the image for the nested container doesn't need to be pulled again since it is cached: +```sh +kubectl delete -f test-pod.yaml +kubectl apply -f test-pod.yaml +kubectl logs -f cached-build +``` diff --git a/examples/cache/config/config.json b/examples/cache/config/config.json new file mode 100644 index 000000000..9efd953ca --- /dev/null +++ b/examples/cache/config/config.json @@ -0,0 +1,16 @@ +{ + "nodePathMap": [ + { + "node": "DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths": ["/data/example-cache-storage"] + }, + { + "node": "minikube", + "paths": ["/data/example-cache-storage"] + }, + { + "node": "kind-control-plane", + "paths": ["/var/opt/example-cache-storage"] + } + ] +} diff --git a/examples/cache/config/helper-pod.yaml b/examples/cache/config/helper-pod.yaml new file mode 100644 index 000000000..f12ed5395 --- /dev/null +++ b/examples/cache/config/helper-pod.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: helper-pod +spec: + containers: + - name: helper + image: quay.io/buildah/stable:v1.18.0 + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + hostPID: true + volumeMounts: + - name: data + mountPropagation: Bidirectional diff --git a/examples/cache/config/setup b/examples/cache/config/setup new file mode 100644 index 000000000..d9b0edf94 --- /dev/null +++ b/examples/cache/config/setup @@ -0,0 +1,76 @@ +#!/bin/sh + +set -eu + +MOUNT_NAME="$(basename "$VOL_DIR")" +CACHE_DIR="$(dirname "$VOL_DIR")/.cache" +CACHE_NAME="${PVC_ANNOTATION_CACHE_NAME:-$(echo "$PVC_NAME" | sed -E 's/^(.+)-[^-]+$/\1/')}" +CACHE_IMAGE="cache/$CACHE_NAME" + +# Args: NAME VALUE +validate() { + PATTERN='^[-_a-z0-9]+$' + echo "$2" | grep -Eq "$PATTERN" \ + || (echo "invalid $1 argument provided: $2 (must match $PATTERN)" >&2; false) +} + +buildah() { + /usr/bin/buildah \ + --root=$CACHE_DIR/containers/storage \ + --storage-driver=overlay \ + "$@" +} + +# Mounts a volume directory based on the latest CACHE_NAME image. +mountCache() { + echo "Creating volume $VOL_DIR from cache '$CACHE_NAME'" >&2 + mkdir -m 0777 "$VOL_DIR" || exit 2 + ( + # Create new volume from cache's latest container image + # (The latest cache image could be pulled from a registry here) + (buildah from --pull-never --name "$MOUNT_NAME" "$CACHE_IMAGE" \ + || ([ $? -eq 125 ] && ( + buildah delete "$MOUNT_NAME" + buildah from --name "$MOUNT_NAME" scratch + ))) >/dev/null && + CONTAINERDIR="$(buildah mount "$MOUNT_NAME")" && + mount -o bind,rshared "$CONTAINERDIR" "$VOL_DIR" && + chmod 0777 "$VOL_DIR" + ) || ( + umount "$VOL_DIR" 2>/dev/null 1>&2 + buildah umount "$MOUNT_NAME" 2>/dev/null 1>&2 + buildah delete "$MOUNT_NAME" 2>/dev/null 1>&2 + rm -rf "$VOL_DIR" + false + ) + echo "$VOL_DIR" +} + +# Unmounts a cache volume directory, commits it and tags it as latest image for the given CACHE_NAME. +umountCache() { + # Commit volume only if dir is mounted (node restart results in unmounted volumes). + if mountpoint -q "$VOL_DIR"; then + echo "Committing volume $VOL_DIR to cache '$CACHE_NAME'" >&2 + IMGID="$(buildah commit -q --timestamp 1 "$MOUNT_NAME")" && + buildah tag "$IMGID" "$CACHE_IMAGE" && + # The latest cache image could be pushed to a registry here + umount "$VOL_DIR" + fi + + # Delete volume / container + echo "Deleting volume $VOL_DIR" >&2 + buildah umount "$MOUNT_NAME" >/dev/null || true + buildah delete "$MOUNT_NAME" >/dev/null || true + rm -rf "$VOL_DIR" || (printf 'error: volume deletion blocked by mount: '; grep $MOUNT_NAME /etc/mtab; false) >&2 +} + + +mkdir -p "$CACHE_DIR/containers/storage" +validate CACHE_NAME "$CACHE_NAME" +validate MOUNT_NAME "$MOUNT_NAME" + +if [ "${1:-}" = teardown ]; then + umountCache +else + mountCache +fi diff --git a/examples/cache/config/teardown b/examples/cache/config/teardown new file mode 100644 index 000000000..2ba9aafb6 --- /dev/null +++ b/examples/cache/config/teardown @@ -0,0 +1,3 @@ +#!/bin/sh + +sh /script/setup teardown diff --git a/examples/cache/helper-test.sh b/examples/cache/helper-test.sh new file mode 100755 index 000000000..f1d6d3adc --- /dev/null +++ b/examples/cache/helper-test.sh @@ -0,0 +1,63 @@ +#!/bin/sh + +cd "$(dirname "$0")" + +VOLNAME=pv-xyz1_default_build-cache +EXPECTED_CONTENT="testcontent $(date)" + +# ARGS: SCRIPTNAME +runScript() { + mkdir -p testmount + docker run --rm --privileged \ + -e VOL_DIR=/data/$VOLNAME \ + -e VOL_NAME=pv-xyz \ + -e VOL_SIZE_BYTES=12345678 \ + -e PVC_NAME=pvc-xyz \ + -e PVC_NAMESPACE=test-namespace \ + -e PVC_ANNOTATION_CACHE_NAME=mycache \ + --mount "type=bind,source=`pwd`/config,target=/script" \ + --mount "type=bind,source=`pwd`/testmount,target=/data,bind-propagation=rshared" \ + --entrypoint=/bin/sh \ + quay.io/buildah/stable:v1.18.0 \ + /script/$1 +} + +set -e + +mkdir -p testmount +rm -rf testmount/$VOLNAME + +echo +echo TEST setup +echo +( + set -ex + runScript setup + + echo "$EXPECTED_CONTENT" > testmount/$VOLNAME/testfile +) + +echo +echo TEST teardown +echo +( + set -ex + runScript teardown + + [ ! -d testmount/$VOLNAME ] || (echo fail: volume should be removed >&2; false) +) + +echo +echo TEST restore +echo +( + set -ex + VOLNAME=pv-xyz2_default_build-cache + + runScript setup + + CONTENT="$(cat testmount/$VOLNAME/testfile)" + [ "$CONTENT" = "$EXPECTED_CONTENT" ] || (echo fail: volume should return what was last written into that cache key >&2; false) + + runScript teardown +) diff --git a/examples/cache/kustomization.yaml b/examples/cache/kustomization.yaml new file mode 100644 index 000000000..0ec2e0698 --- /dev/null +++ b/examples/cache/kustomization.yaml @@ -0,0 +1,40 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: example-cache-storage +namePrefix: example-cache- + +commonLabels: + app: example-cache-provisioner + +resources: +- ../../deploy + +patchesStrategicMerge: +- provisioner-patch.yaml + +patchesJson6902: +- target: + version: v1 + kind: Namespace + name: local-path-storage + path: namespace-patch.yaml +- target: + group: storage.k8s.io + version: v1 + kind: StorageClass + name: local-path + path: storageclass-patch.yaml + +configMapGenerator: +- name: local-path-config + namespace: local-path-storage + behavior: merge + files: + - config/config.json + - config/helper-pod.yaml + - config/setup + - config/teardown + +generatorOptions: + disableNameSuffixHash: true diff --git a/examples/cache/namespace-patch.yaml b/examples/cache/namespace-patch.yaml new file mode 100644 index 000000000..15201d730 --- /dev/null +++ b/examples/cache/namespace-patch.yaml @@ -0,0 +1,3 @@ +- op: replace + path: /metadata/name + value: example-cache-storage diff --git a/examples/cache/provisioner-patch.yaml b/examples/cache/provisioner-patch.yaml new file mode 100644 index 000000000..600b16785 --- /dev/null +++ b/examples/cache/provisioner-patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: local-path-provisioner + namespace: local-path-storage +spec: + template: + spec: + containers: + - name: local-path-provisioner + env: + - name: CONFIGMAP_NAME + value: example-cache-local-path-config + - name: SERVICE_ACCOUNT_NAME + value: example-cache-local-path-provisioner-service-account + - name: PROVISIONER_NAME + value: storage.example.org/cache + - name: PVC_ANNOTATION + value: storage.example.org + - name: PVC_ANNOTATION_REQUIRED + value: storage.example.org/cache-name + - name: HELPER_POD_TIMEOUT + value: "2m" diff --git a/examples/cache/storageclass-patch.yaml b/examples/cache/storageclass-patch.yaml new file mode 100644 index 000000000..f1436c896 --- /dev/null +++ b/examples/cache/storageclass-patch.yaml @@ -0,0 +1,6 @@ +- op: replace + path: /metadata/name + value: example-cache +- op: replace + path: /provisioner + value: storage.example.org/cache diff --git a/examples/cache/test-pod.yaml b/examples/cache/test-pod.yaml new file mode 100644 index 000000000..4391cc017 --- /dev/null +++ b/examples/cache/test-pod.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: cached-build +spec: + restartPolicy: Never + securityContext: + runAsUser: 9000 + runAsGroup: 9000 + fsGroup: 9000 + containers: + - name: build + image: mgoltzsche/podman:2.2.1 + command: ["/bin/sh"] + args: + - -c + - | + set -ex + mktemp -d -p $HOME + ([ -f $HOME/date ] || date > $HOME/date) && cat $HOME/date + OOMSCORE=$(cat /proc/self/oom_score_adj) + podman run --name build --rm --oom-score-adj "$OOMSCORE" alpine:3.12 echo hello from nested container + env: + - name: HOME + value: /podman + securityContext: + privileged: true + volumeMounts: + # simply cache home directory + - mountPath: /podman + name: cache + volumes: + - name: cache + persistentVolumeClaim: + claimName: build-cache +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: build-cache + annotations: + storage.example.org/cache-name: example-project +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 1Gi + storageClassName: example-cache diff --git a/examples/quota/README.md b/examples/quota/README.md index 49d61569d..25f1b2264 100644 --- a/examples/quota/README.md +++ b/examples/quota/README.md @@ -2,8 +2,8 @@ this is an example to enable quota for xfs # Usage -> 1. build a helper image using the sample dockerfile to replace helper image xxx/storage-xfs-quota:v0.1 at configmap(helperPod.yaml) of debug.yaml. -> 2. use the sample setup and teardown script at configmap of debug.yaml +> 1. build a helper image using the sample dockerfile to replace helper image xxx/storage-xfs-quota:v0.1 at configmap(config/helper-pod.yaml). +> 2. use the sample setup and teardown scripts contained within the kustomization. Notice: > 1. make sure the path at nodePathMap is the mountpoint of xfs which enables pquota @@ -13,6 +13,7 @@ Notice: > git clone https://github.com/rancher/local-path-provisioner.git > cd local-path-provisioner > go build -> kubectl apply -f debug.yaml +> kustomize build example/quota | kubectl apply -f - +> kubectl delete -n local-path-storage deploy local-path-provisioner > ./local-path-provisioner --debug start --namespace=local-path-storage ``` diff --git a/examples/quota/helper-pod.yaml b/examples/quota/config/helper-pod.yaml similarity index 100% rename from examples/quota/helper-pod.yaml rename to examples/quota/config/helper-pod.yaml diff --git a/examples/quota/setup b/examples/quota/config/setup similarity index 57% rename from examples/quota/setup rename to examples/quota/config/setup index 958a308fb..b22486c93 100755 --- a/examples/quota/setup +++ b/examples/quota/config/setup @@ -1,23 +1,10 @@ #!/bin/sh -while getopts "m:s:p:" opt -do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac -done - -xfsPath=$(dirname "$absolutePath") -pvcName=$(basename "$absolutePath") -mkdir -p ${absolutePath} +xfsPath=$(dirname "$VOL_DIR") +pvcName=$(basename "$VOL_DIR") +sizeInBytes=$VOL_SIZE_BYTES + +mkdir -p "$VOL_DIR" # support xfs quota type=`stat -f -c %T ${xfsPath}` @@ -34,10 +21,10 @@ if [ ${type} == 'xfs' ]; then id=$[${id}+1] fi - echo "${id}:${absolutePath}" >> /etc/projects + echo "${id}:${VOL_DIR}" >> /etc/projects echo "${pvcName}:${id}" >> /etc/projid xfs_quota -x -c "project -s ${pvcName}" xfs_quota -x -c "limit -p bhard=${sizeInBytes} ${pvcName}" ${xfsPath} xfs_quota -x -c "report -pbih" ${xfsPath} -fi \ No newline at end of file +fi diff --git a/examples/quota/teardown b/examples/quota/config/teardown similarity index 56% rename from examples/quota/teardown rename to examples/quota/config/teardown index 3e9268dbb..67dbad850 100755 --- a/examples/quota/teardown +++ b/examples/quota/config/teardown @@ -1,22 +1,7 @@ #!/bin/sh -while getopts "m:s:p:" opt -do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac -done - -xfsPath=$(dirname "$absolutePath") -pvcName=$(basename "$absolutePath") +xfsPath=$(dirname "$VOL_DIR") +pvcName=$(basename "$VOL_DIR") # support xfs quota type=`stat -f -c %T ${xfsPath}` @@ -26,10 +11,10 @@ if [ ${type} == 'xfs' ]; then xfs_quota -x -c "limit -p bhard=0 ${pvcName}" ${xfsPath} fi -rm -rf ${absolutePath} +rm -rf "$VOL_DIR" + if [ ${type} == 'xfs' ]; then echo "$(sed "/${pvcName}/d" /etc/projects)" > /etc/projects echo "$(sed "/${pvcName}/d" /etc/projid)" > /etc/projid xfs_quota -x -c "report -pbih" ${xfsPath} fi - diff --git a/examples/quota/debug.yaml b/examples/quota/debug.yaml deleted file mode 100644 index a6f8ec62f..000000000 --- a/examples/quota/debug.yaml +++ /dev/null @@ -1,141 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: local-path-storage ---- - -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: local-path -provisioner: rancher.io/local-path -volumeBindingMode: WaitForFirstConsumer -reclaimPolicy: Delete - ---- - -kind: ConfigMap -apiVersion: v1 -metadata: - name: local-path-config - namespace: local-path-storage -data: - config.json: |- - { - "nodePathMap":[ - { - "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", - "paths":["/opt/local-path-provisioner"] - }, - { - "node":"yasker-lp-dev1", - "paths":["/opt/local-path-provisioner", "/data1"] - }, - { - "node":"yasker-lp-dev3", - "paths":[] - } - ] - } - setup: |- - #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - xfsPath=$(dirname "$absolutePath") - pvcName=$(basename "$absolutePath") - mkdir -p ${absolutePath} - - # support xfs quota - type=`stat -f -c %T ${xfsPath}` - if [ ${type} == 'xfs' ]; then - - echo "support xfs quota" - - project=`cat /etc/projects | tail -n 1` - id=`echo ${project%:*}` - - if [ ! ${project} ]; then - id=1 - else - id=$[${id}+1] - fi - - echo "${id}:${absolutePath}" >> /etc/projects - echo "${pvcName}:${id}" >> /etc/projid - - xfs_quota -x -c "project -s ${pvcName}" - xfs_quota -x -c "limit -p bhard=${sizeInBytes} ${pvcName}" ${xfsPath} - xfs_quota -x -c "report -pbih" ${xfsPath} - fi - - teardown: |- - #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - xfsPath=$(dirname "$absolutePath") - pvcName=$(basename "$absolutePath") - - # support xfs quota - type=`stat -f -c %T ${xfsPath}` - if [ ${type} == 'xfs' ]; then - - echo "support xfs quota" - xfs_quota -x -c "limit -p bhard=0 ${pvcName}" ${xfsPath} - fi - - rm -rf ${absolutePath} - if [ ${type} == 'xfs' ]; then - echo "$(sed "/${pvcName}/d" /etc/projects)" > /etc/projects - echo "$(sed "/${pvcName}/d" /etc/projid)" > /etc/projid - xfs_quota -x -c "report -pbih" ${xfsPath} - fi - - helperPod.yaml: |- - apiVersion: v1 - kind: Pod - metadata: - name: helper-pod - spec: - containers: - - name: helper-pod - image: xxx/storage-xfs-quota:v0.1 - imagePullPolicy: Always - securityContext: - privileged: true - volumeMounts: - - name: xfs-quota-projects - subPath: projects - mountPath: /etc/projects - - name: xfs-quota-projects - subPath: projid - mountPath: /etc/projid - volumes: - - name: xfs-quota-projects - hostPath: - path: /etc diff --git a/examples/quota/kustomization.yaml b/examples/quota/kustomization.yaml new file mode 100644 index 000000000..541ac1542 --- /dev/null +++ b/examples/quota/kustomization.yaml @@ -0,0 +1,17 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- ../../deploy + +configMapGenerator: +- name: local-path-config + namespace: local-path-storage + behavior: merge + files: + - config/helper-pod.yaml + - config/setup + - config/teardown + +generatorOptions: + disableNameSuffixHash: true diff --git a/main.go b/main.go index 700fdc00f..a9924a905 100644 --- a/main.go +++ b/main.go @@ -5,7 +5,9 @@ import ( "os" "os/signal" "path/filepath" + "strings" "syscall" + "time" "github.com/Sirupsen/logrus" "github.com/pkg/errors" @@ -19,26 +21,34 @@ import ( ) var ( - VERSION = "0.0.1" - FlagConfigFile = "config" - FlagProvisionerName = "provisioner-name" - EnvProvisionerName = "PROVISIONER_NAME" - DefaultProvisionerName = "rancher.io/local-path" - FlagNamespace = "namespace" - EnvNamespace = "POD_NAMESPACE" - DefaultNamespace = "local-path-storage" - FlagHelperImage = "helper-image" - EnvHelperImage = "HELPER_IMAGE" - DefaultHelperImage = "rancher/library-busybox:1.31.1" - FlagServiceAccountName = "service-account-name" - DefaultServiceAccount = "local-path-provisioner-service-account" - EnvServiceAccountName = "SERVICE_ACCOUNT_NAME" - FlagKubeconfig = "kubeconfig" - DefaultConfigFileKey = "config.json" - DefaultConfigMapName = "local-path-config" - FlagConfigMapName = "configmap-name" - FlagHelperPodFile = "helper-pod-file" - DefaultHelperPodFile = "helperPod.yaml" + VERSION = "0.0.1" + FlagConfigFile = "config" + FlagProvisionerName = "provisioner-name" + EnvProvisionerName = "PROVISIONER_NAME" + DefaultProvisionerName = "rancher.io/local-path" + FlagNamespace = "namespace" + EnvNamespace = "POD_NAMESPACE" + DefaultNamespace = "local-path-storage" + FlagHelperImage = "helper-image" + EnvHelperImage = "HELPER_IMAGE" + DefaultHelperImage = "rancher/library-busybox:1.31.1" + FlagServiceAccountName = "service-account-name" + DefaultServiceAccount = "local-path-provisioner-service-account" + EnvServiceAccountName = "SERVICE_ACCOUNT_NAME" + FlagKubeconfig = "kubeconfig" + DefaultConfigFileKey = "config.json" + DefaultConfigMapName = "local-path-config" + FlagConfigMapName = "configmap-name" + EnvConfigMapName = "CONFIGMAP_NAME" + FlagHelperPodFile = "helper-pod-file" + DefaultHelperPodFile = "helper-pod.yaml" + EnvHelperPodFile = "HELPER_POD_FILE" + FlagHelperPodTimeout = "helper-pod-timeout" + EnvHelperPodTimeout = "HELPER_POD_TIMEOUT" + FlagPVCAnnotation = "pvc-annotation" + EnvPVCAnnotation = "PVC_ANNOTATION" + FlagPVCAnnotationRequired = "pvc-annotation-required" + EnvPVCAnnotationRequired = "PVC_ANNOTATION_REQUIRED" ) func cmdNotFound(c *cli.Context, command string) { @@ -50,12 +60,15 @@ func onUsageError(c *cli.Context, err error, isSubcommand bool) error { } func RegisterShutdownChannel(done chan struct{}) { - sigs := make(chan os.Signal, 1) + sigs := make(chan os.Signal, 2) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) go func() { sig := <-sigs - logrus.Infof("Receive %v to exit", sig) + logrus.Infof("Received %v signal - terminating", sig) close(done) + <-sigs + logrus.Info("Received 2nd termination signal - exiting forcefully") + os.Exit(254) }() } @@ -80,21 +93,16 @@ func StartCmd() cli.Command { EnvVar: EnvNamespace, Value: DefaultNamespace, }, - cli.StringFlag{ - Name: FlagHelperImage, - Usage: "Required. The helper image used for create/delete directories on the host", - EnvVar: EnvHelperImage, - Value: DefaultHelperImage, - }, cli.StringFlag{ Name: FlagKubeconfig, Usage: "Paths to a kubeconfig. Only required when it is out-of-cluster.", Value: "", }, cli.StringFlag{ - Name: FlagConfigMapName, - Usage: "Required. Specify configmap name.", - Value: DefaultConfigMapName, + Name: FlagConfigMapName, + Usage: "Required. Specify configmap name.", + EnvVar: EnvConfigMapName, + Value: DefaultConfigMapName, }, cli.StringFlag{ Name: FlagServiceAccountName, @@ -103,9 +111,28 @@ func StartCmd() cli.Command { Value: DefaultServiceAccount, }, cli.StringFlag{ - Name: FlagHelperPodFile, - Usage: "Paths to the Helper pod yaml file", - Value: "", + Name: FlagHelperPodFile, + Usage: "Paths to the Helper pod yaml file", + EnvVar: EnvHelperPodFile, + Value: "", + }, + cli.StringFlag{ + Name: FlagHelperPodTimeout, + Usage: "Helper pod command execution timeout", + EnvVar: EnvHelperPodTimeout, + Value: "2m", + }, + cli.StringFlag{ + Name: FlagPVCAnnotation, + Usage: "A PersistentVolumeClaim annotation or prefix passed through to the helper pod as env vars (PVC_ANNOTATION[_]=)", + EnvVar: EnvPVCAnnotation, + Value: "", + }, + cli.StringFlag{ + Name: FlagPVCAnnotationRequired, + Usage: "Annotation that must be specified on PersistentVolumeClaims (multiple comma-separated)", + EnvVar: EnvPVCAnnotationRequired, + Value: "", }, }, Action: func(c *cli.Context) { @@ -196,17 +223,19 @@ func startDaemon(c *cli.Context) error { return fmt.Errorf("invalid empty flag %v and it also does not exist at ConfigMap %v/%v with err: %v", FlagConfigFile, namespace, configMapName, err) } } - helperImage := c.String(FlagHelperImage) - if helperImage == "" { - return fmt.Errorf("invalid empty flag %v", FlagHelperImage) - } - serviceAccountName := c.String(FlagServiceAccountName) if serviceAccountName == "" { return fmt.Errorf("invalid empty flag %v", FlagServiceAccountName) } - // if helper pod file is not specified, then find the helper pod by configmap with key = helperPod.yaml + pvcAnnotation := c.String(FlagPVCAnnotation) + pvcAnnotationsRequired := c.String(FlagPVCAnnotationRequired) + var requiredPVCAnnotations []string + if pvcAnnotationsRequired != "" { + requiredPVCAnnotations = strings.Split(pvcAnnotationsRequired, ",") + } + + // if helper pod file is not specified, then find the helper pod by configmap with key = helper-pod.yaml // if helper pod file is specified with flag FlagHelperPodFile, then load the file helperPodFile := c.String(FlagHelperPodFile) helperPodYaml := "" @@ -221,8 +250,13 @@ func startDaemon(c *cli.Context) error { return fmt.Errorf("could not open file %v with err: %v", helperPodFile, err) } } + helperPodTimeoutStr := c.String(FlagHelperPodTimeout) + helperPodTimeout, err := time.ParseDuration(helperPodTimeoutStr) + if err != nil { + return fmt.Errorf("invalid %s option provided: %s", FlagHelperPodTimeout, err) + } - provisioner, err := NewProvisioner(stopCh, kubeClient, configFile, namespace, helperImage, configMapName, serviceAccountName, helperPodYaml) + provisioner, err := NewProvisioner(stopCh, kubeClient, configFile, namespace, configMapName, serviceAccountName, helperPodYaml, helperPodTimeout, pvcAnnotation, requiredPVCAnnotations) if err != nil { return err } diff --git a/provisioner.go b/provisioner.go index 84f8f802f..c8451ab97 100644 --- a/provisioner.go +++ b/provisioner.go @@ -6,6 +6,8 @@ import ( "os" "path/filepath" "reflect" + "regexp" + "sort" "strconv" "strings" "sync" @@ -31,29 +33,43 @@ const ( KeyNode = "kubernetes.io/hostname" NodeDefaultNonListedNodes = "DEFAULT_PATH_FOR_NON_LISTED_NODES" + + annotationPVCName = "local-path-provisioner.rancher.io/pvc-name" + annotationPVCNamespace = "local-path-provisioner.rancher.io/pvc-namespace" + envVolDir = "VOL_DIR" + envVolName = "VOL_NAME" + envVolType = "VOL_TYPE" + envVolSize = "VOL_SIZE_BYTES" + envPVCName = "PVC_NAME" + envPVCNamespace = "PVC_NAMESPACE" + envPVCAnnotation = "PVC_ANNOTATION" + helperScriptDir = "/script" + helperDataVolName = "data" + helperScriptVolName = "script" ) var ( - CmdTimeoutCounts = 120 - ConfigFileCheckInterval = 30 * time.Second HelperPodNameMaxLength = 128 + + invNameRegex = regexp.MustCompile("[^a-zA-Z0-9]+") ) type LocalPathProvisioner struct { - stopCh chan struct{} - kubeClient *clientset.Clientset - namespace string - helperImage string - serviceAccountName string - - config *Config - configData *ConfigData - configFile string - configMapName string - configMutex *sync.RWMutex - helperPod *v1.Pod + kubeClient *clientset.Clientset + namespace string + serviceAccountName string + pvcAnnotation string + pvcAnnotationsRequired []string + + config *Config + configData *ConfigData + configFile string + configMapName string + configMutex *sync.RWMutex + helperPod *v1.Pod + helperPodTimeout time.Duration } type NodePathMapData struct { @@ -74,14 +90,14 @@ type Config struct { } func NewProvisioner(stopCh chan struct{}, kubeClient *clientset.Clientset, - configFile, namespace, helperImage, configMapName, serviceAccountName, helperPodYaml string) (*LocalPathProvisioner, error) { + configFile, namespace, configMapName, serviceAccountName, helperPodYaml string, helperPodTimeout time.Duration, pvcAnnotation string, pvcAnnotationsRequired []string) (*LocalPathProvisioner, error) { p := &LocalPathProvisioner{ - stopCh: stopCh, - - kubeClient: kubeClient, - namespace: namespace, - helperImage: helperImage, - serviceAccountName: serviceAccountName, + kubeClient: kubeClient, + namespace: namespace, + serviceAccountName: serviceAccountName, + pvcAnnotation: pvcAnnotation, + pvcAnnotationsRequired: pvcAnnotationsRequired, + helperPodTimeout: helperPodTimeout, // config will be updated shortly by p.refreshConfig() config: nil, @@ -98,7 +114,7 @@ func NewProvisioner(stopCh chan struct{}, kubeClient *clientset.Clientset, if err := p.refreshConfig(); err != nil { return nil, err } - p.watchAndRefreshConfig() + p.watchAndRefreshConfig(stopCh) return p, nil } @@ -131,7 +147,7 @@ func (p *LocalPathProvisioner) refreshConfig() error { return err } -func (p *LocalPathProvisioner) watchAndRefreshConfig() { +func (p *LocalPathProvisioner) watchAndRefreshConfig(done chan struct{}) { go func() { ticker := time.NewTicker(ConfigFileCheckInterval) defer ticker.Stop() @@ -141,7 +157,7 @@ func (p *LocalPathProvisioner) watchAndRefreshConfig() { if err := p.refreshConfig(); err != nil { logrus.Errorf("failed to load the new config file: %v", err) } - case <-p.stopCh: + case <-done: logrus.Infof("stop watching config file") return } @@ -191,6 +207,11 @@ func (p *LocalPathProvisioner) Provision(opts pvController.ProvisionOptions) (*v if opts.SelectedNode == nil { return nil, fmt.Errorf("configuration error, no node was specified") } + for _, a := range p.pvcAnnotationsRequired { + if pvc.Annotations == nil || pvc.Annotations[a] == "" { + return nil, fmt.Errorf("PVC does not specify required annotation %q", a) + } + } basePath, err := p.getRandomPathOnNode(node.Name) if err != nil { @@ -204,13 +225,20 @@ func (p *LocalPathProvisioner) Provision(opts pvController.ProvisionOptions) (*v logrus.Infof("Creating volume %v at %v:%v", name, node.Name, path) storage := pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - volMode := string(*pvc.Spec.VolumeMode) - - createCmdsForPath := []string{ - "/bin/sh", - "/script/setup", - } - if err := p.createHelperPod(ActionTypeCreate, createCmdsForPath, name, path, node.Name, volMode, storage.Value()); err != nil { + env, annotations := p.annotationsToEnv(pvc.Annotations) + annotations[annotationPVCName] = pvc.Name + annotations[annotationPVCNamespace] = pvc.Namespace + provisionCmd := []string{"/bin/sh", "/script/setup"} + if err := p.createHelperPod(ActionTypeCreate, provisionCmd, volumeOptions{ + Name: name, + Path: path, + Mode: *pvc.Spec.VolumeMode, + SizeInBytes: storage.Value(), + Node: node.Name, + PVCName: pvc.Name, + PVCNamespace: pvc.Namespace, + Env: env, + }); err != nil { return nil, err } @@ -218,7 +246,8 @@ func (p *LocalPathProvisioner) Provision(opts pvController.ProvisionOptions) (*v hostPathType := v1.HostPathDirectoryOrCreate return &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: name, + Annotations: annotations, }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: *opts.StorageClass.ReclaimPolicy, @@ -262,12 +291,21 @@ func (p *LocalPathProvisioner) Delete(pv *v1.PersistentVolume) (err error) { if err != nil { return err } + env, _ := p.annotationsToEnv(pv.Annotations) if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimRetain { logrus.Infof("Deleting volume %v at %v:%v", pv.Name, node, path) storage := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] - volMode := string(*pv.Spec.VolumeMode) - cleanupCmdsForPath := []string{"/bin/sh", "/script/teardown"} - if err := p.createHelperPod(ActionTypeDelete, cleanupCmdsForPath, pv.Name, path, node, volMode, storage.Value()); err != nil { + cleanupCmd := []string{"/bin/sh", "/script/teardown"} + if err := p.createHelperPod(ActionTypeDelete, cleanupCmd, volumeOptions{ + Name: pv.Name, + Path: path, + Mode: *pv.Spec.VolumeMode, + SizeInBytes: storage.Value(), + Node: node, + PVCName: pv.Annotations[annotationPVCName], + PVCNamespace: pv.Annotations[annotationPVCNamespace], + Env: env, + }); err != nil { logrus.Infof("clean up volume %v failed: %v", pv.Name, err) return err } @@ -277,6 +315,34 @@ func (p *LocalPathProvisioner) Delete(pv *v1.PersistentVolume) (err error) { return nil } +func (p *LocalPathProvisioner) annotationsToEnv(annotations map[string]string) (env []v1.EnvVar, matched map[string]string) { + matched = map[string]string{} + if len(annotations) > 0 && p.pvcAnnotation != "" { + annotationPrefix := p.pvcAnnotation + "/" + annotationKeys := make([]string, 0, len(annotations)) + for k := range annotations { + annotationKeys = append(annotationKeys, k) + } + sort.Strings(annotationKeys) + for _, k := range annotationKeys { + v := annotations[k] + if k == p.pvcAnnotation { + matched[k] = v + env = append(env, v1.EnvVar{Name: envPVCAnnotation, Value: v}) + } else if strings.HasPrefix(k, annotationPrefix) { + path := k[len(annotationPrefix):] + envName := strings.ToUpper(invNameRegex.ReplaceAllString(path, "_")) + if envName != "" { + matched[k] = v + envName = fmt.Sprintf("%s_%s", envPVCAnnotation, envName) + env = append(env, v1.EnvVar{Name: envName, Value: v}) + } + } + } + } + return +} + func (p *LocalPathProvisioner) getPathAndNodeForPV(pv *v1.PersistentVolume) (path, node string, err error) { defer func() { err = errors.Wrapf(err, "failed to delete volume %v", pv.Name) @@ -318,29 +384,33 @@ func (p *LocalPathProvisioner) getPathAndNodeForPV(pv *v1.PersistentVolume) (pat return path, node, nil } -func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath []string, name, path, node, volumeMode string, sizeInBytes int64) (err error) { +type volumeOptions struct { + Name string + Path string + Mode v1.PersistentVolumeMode + SizeInBytes int64 + Node string + PVCName string + PVCNamespace string + Env []v1.EnvVar +} + +func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmd []string, o volumeOptions) (err error) { defer func() { - err = errors.Wrapf(err, "failed to %v volume %v", action, name) + err = errors.Wrapf(err, "failed to %v volume %v", action, o.Name) }() - if name == "" || path == "" || node == "" { + if o.Name == "" || o.Path == "" || o.Node == "" { return fmt.Errorf("invalid empty name or path or node") } - path, err = filepath.Abs(path) - if err != nil { - return err - } - path = strings.TrimSuffix(path, "/") - parentDir, volumeDir := filepath.Split(path) - parentDir = strings.TrimSuffix(parentDir, "/") - volumeDir = strings.TrimSuffix(volumeDir, "/") - if parentDir == "" || volumeDir == "" { - // it covers the `/` case - return fmt.Errorf("invalid path %v for %v: cannot find parent dir or volume dir", action, path) + if !filepath.IsAbs(o.Path) { + return fmt.Errorf("volume path %s is not absolute", o.Path) } + o.Path = filepath.Clean(o.Path) + parentDir, volumeDir := filepath.Split(o.Path) hostPathType := v1.HostPathDirectoryOrCreate lpvVolumes := []v1.Volume{ { - Name: "data", + Name: helperDataVolName, VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ Path: parentDir, @@ -349,7 +419,7 @@ func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath [] }, }, { - Name: "script", + Name: helperScriptVolName, VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{ @@ -369,18 +439,6 @@ func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath [] }, }, } - lpvVolumeMounts := []v1.VolumeMount{ - { - Name: "data", - ReadOnly: false, - MountPath: parentDir, - }, - { - Name: "script", - ReadOnly: false, - MountPath: "/script", - }, - } lpvTolerations := []v1.Toleration{ { Operator: v1.TolerationOpExists, @@ -388,23 +446,43 @@ func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath [] } helperPod := p.helperPod.DeepCopy() + scriptMount := addVolumeMount(&helperPod.Spec.Containers[0].VolumeMounts, helperScriptVolName, helperScriptDir) + scriptMount.MountPath = helperScriptDir + dataMount := addVolumeMount(&helperPod.Spec.Containers[0].VolumeMounts, helperDataVolName, parentDir) + parentDir = dataMount.MountPath + parentDir = strings.TrimSuffix(parentDir, string(filepath.Separator)) + volumeDir = strings.TrimSuffix(volumeDir, string(filepath.Separator)) + if parentDir == "" || volumeDir == "" { + // it covers the `/` case + return fmt.Errorf("invalid path %v for %v: cannot find parent dir or volume dir", action, o.Path) + } + volumeDir = filepath.Join(parentDir, volumeDir) + // Specify the helper pod's env vars. + // Vars with empty values are excluded to support default values within the pod template + env := filterEmptyEnvVars(append(o.Env, + v1.EnvVar{Name: envVolDir, Value: volumeDir}, + v1.EnvVar{Name: envVolType, Value: string(o.Mode)}, + v1.EnvVar{Name: envVolSize, Value: strconv.FormatInt(o.SizeInBytes, 10)}, + v1.EnvVar{Name: envVolName, Value: o.Name}, + v1.EnvVar{Name: envPVCName, Value: o.PVCName}, + v1.EnvVar{Name: envPVCNamespace, Value: o.PVCNamespace})) + // use different name for helper pods // https://github.com/rancher/local-path-provisioner/issues/154 - helperPod.Name = (helperPod.Name + "-" + string(action) + "-" + name) + helperPod.Name = (helperPod.Name + "-" + string(action) + "-" + o.Name) if len(helperPod.Name) > HelperPodNameMaxLength { helperPod.Name = helperPod.Name[:HelperPodNameMaxLength] } helperPod.Namespace = p.namespace - helperPod.Spec.NodeName = node - helperPod.Spec.ServiceAccountName = p.serviceAccountName + helperPod.Spec.NodeName = o.Node + if helperPod.Spec.ServiceAccountName == "" { + helperPod.Spec.ServiceAccountName = p.serviceAccountName + } helperPod.Spec.RestartPolicy = v1.RestartPolicyNever helperPod.Spec.Tolerations = append(helperPod.Spec.Tolerations, lpvTolerations...) helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, lpvVolumes...) - helperPod.Spec.Containers[0].VolumeMounts = append(helperPod.Spec.Containers[0].VolumeMounts, lpvVolumeMounts...) - helperPod.Spec.Containers[0].Command = cmdsForPath - helperPod.Spec.Containers[0].Args = []string{"-p", filepath.Join(parentDir, volumeDir), - "-s", strconv.FormatInt(sizeInBytes, 10), - "-m", volumeMode} + helperPod.Spec.Containers[0].Env = append(helperPod.Spec.Containers[0].Env, env...) + helperPod.Spec.Containers[0].Command = cmd // If it already exists due to some previous errors, the pod will be cleaned up later automatically // https://github.com/rancher/local-path-provisioner/issues/27 @@ -421,24 +499,52 @@ func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath [] } }() - completed := false - for i := 0; i < CmdTimeoutCounts; i++ { + done := make(chan struct{}) + go func() { + <-time.After(p.helperPodTimeout) + close(done) + }() + for { if pod, err := p.kubeClient.CoreV1().Pods(p.namespace).Get(helperPod.Name, metav1.GetOptions{}); err != nil { return err } else if pod.Status.Phase == v1.PodSucceeded { - completed = true break } - time.Sleep(1 * time.Second) - } - if !completed { - return fmt.Errorf("create process timeout after %v seconds", CmdTimeoutCounts) + select { + case <-done: + return fmt.Errorf("helper pod timed out after %s seconds", p.helperPodTimeout) + default: + time.Sleep(time.Second) + } } - logrus.Infof("Volume %v has been %vd on %v:%v", name, action, node, path) + logrus.Infof("Volume %v has been %vd on %v:%v", o.Name, action, o.Node, o.Path) return nil } +func filterEmptyEnvVars(env []v1.EnvVar) (r []v1.EnvVar) { + r = make([]v1.EnvVar, 0, len(env)) + for _, e := range env { + if e.Value != "" { + r = append(r, e) + } + } + return r +} + +func addVolumeMount(mounts *[]v1.VolumeMount, name, mountPath string) *v1.VolumeMount { + for i, m := range *mounts { + if m.Name == name { + if m.MountPath == "" { + (*mounts)[i].MountPath = mountPath + } + return &(*mounts)[i] + } + } + *mounts = append(*mounts, v1.VolumeMount{Name: name, MountPath: mountPath}) + return &(*mounts)[len(*mounts)-1] +} + func isJSONFile(configFile string) bool { return strings.HasSuffix(configFile, ".json") } diff --git a/util.go b/util.go index 5849dab1c..0a0b5415d 100644 --- a/util.go +++ b/util.go @@ -33,5 +33,8 @@ func loadHelperPodFile(helperPodYaml string) (*v1.Pod, error) { if err != nil { return nil, fmt.Errorf("invalid unmarshal the helper pod with helperPodJson: %v", string(helperPodJSON)) } + if len(p.Spec.Containers) == 0 { + return nil, fmt.Errorf("helper pod template does not specify any container") + } return &p, nil }