From 4f94084ba0d1302558a021117a4813088947e9ef Mon Sep 17 00:00:00 2001 From: Alexander Wels Date: Mon, 8 Apr 2024 09:13:53 -0500 Subject: [PATCH] Implement storage snapshot mapping (#106) Read the mapping from the driver-config ConfigMap. Use this mapping to determine if a snapshot can be created with the storage/volume snapshot class combination. If not possible return an error with a list of the possible snapshot classes. Signed-off-by: Alexander Wels --- README.md | 2 +- .../kubevirt-csi-driver.go | 7 +- deploy/tenant/base/deploy.yaml | 4 + docs/snapshot-driver-config.md | 97 + go.mod | 4 +- go.sum | 2 + hack/cluster-sync-split.sh | 1 - hack/cluster-sync.sh | 1 - hack/common.sh | 32 +- pkg/generated/bindata.go | 2468 ++++++++++++++--- pkg/kubevirt/client.go | 252 +- pkg/kubevirt/client_test.go | 224 +- pkg/service/controller.go | 14 +- pkg/service/controller_test.go | 9 +- pkg/service/identity.go | 6 +- pkg/util/util.go | 12 +- .../pkg/apis/core/v1beta1/types.go | 74 +- .../core/v1beta1/types_swagger_generated.go | 41 +- .../core/v1beta1/zz_generated.deepcopy.go | 112 + vendor/modules.txt | 4 +- 20 files changed, 2705 insertions(+), 661 deletions(-) create mode 100644 docs/snapshot-driver-config.md diff --git a/README.md b/README.md index 0278964d..9cb6db5a 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ data: infraClusterNamespace: kvcluster infraClusterLabels: csi-driver/cluster=tenant ``` - +For more information about the fields available in the `driver-config` ConfigMap see this [documentation](docs/snapshot-driver-config.md) ```yaml kind: Deployment apiVersion: apps/v1 diff --git a/cmd/kubevirt-csi-driver/kubevirt-csi-driver.go b/cmd/kubevirt-csi-driver/kubevirt-csi-driver.go index f7864d9a..dff46ee3 100644 --- a/cmd/kubevirt-csi-driver/kubevirt-csi-driver.go +++ b/cmd/kubevirt-csi-driver/kubevirt-csi-driver.go @@ -14,6 +14,7 @@ import ( "k8s.io/client-go/tools/clientcmd" klog "k8s.io/klog/v2" + snapcli "kubevirt.io/csi-driver/pkg/generated/external-snapshotter/client-go/clientset/versioned" "kubevirt.io/csi-driver/pkg/kubevirt" "kubevirt.io/csi-driver/pkg/service" "kubevirt.io/csi-driver/pkg/util" @@ -90,11 +91,15 @@ func handle() { if err != nil { klog.Fatalf("Failed to build tenant client set: %v", err) } + tenantSnapshotClientSet, err := snapcli.NewForConfig(tenantRestConfig) + if err != nil { + klog.Fatalf("Failed to build tenant snapshot client set: %v", err) + } infraClusterLabelsMap := parseLabels() storageClassEnforcement := configureStorageClassEnforcement(infraStorageClassEnforcement) - virtClient, err := kubevirt.NewClient(infraRestConfig, infraClusterLabelsMap, storageClassEnforcement, *volumePrefix) + virtClient, err := kubevirt.NewClient(infraRestConfig, infraClusterLabelsMap, tenantClientSet, tenantSnapshotClientSet, storageClassEnforcement, *volumePrefix) if err != nil { klog.Fatal(err) } diff --git a/deploy/tenant/base/deploy.yaml b/deploy/tenant/base/deploy.yaml index 7e5d0277..f5cc4981 100644 --- a/deploy/tenant/base/deploy.yaml +++ b/deploy/tenant/base/deploy.yaml @@ -129,6 +129,10 @@ rules: resources: ["securitycontextconstraints"] verbs: ["use"] resourceNames: ["privileged"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["list"] + --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 diff --git a/docs/snapshot-driver-config.md b/docs/snapshot-driver-config.md new file mode 100644 index 00000000..28fe9aae --- /dev/null +++ b/docs/snapshot-driver-config.md @@ -0,0 +1,97 @@ +# Configuring infra volume snapshot classes to map to tenant volume snapshot classes +It is possible to map multiple infra storage classes to multiple matching tenant storage classes. For instance if the infra cluster has 2 completely separate storage classes like storage class a and b, we can map them to tenant storage classes x and y. In order to define this mapping one should create a config map in the namespace of the infra storage classes that is used by the tenant storage class. This config map will have a few key value pairs that define the expected behavior. + +* allowAll: If allow all is true, then allow all available storage classes in the infra cluster to be mapping to storage classes in the tenant cluster. If false, then use the allowList to limit which storage classes are visible to the tenant. +* allowDefault: If true, then no explicit mapping needs to be defined, and the driver will attempt to use the default storage class and default volume snapshot class of the infra cluster to satisfy requests from the tenant cluster +* allowList: A comma separated string list of all the allowed infra storage classes. Only used if allowAll is false. +* storageSnapshotMapping: Groups lists of infra storage classes and infra volume snapshot classes together. If in the same grouping then creating a snapshot using any of the listed volume snapshot class should work with any of the listed storage classes. Should only contain volume snapshot classes that are compatible with the listed storage classes. This is needed because it is not always possible to determine using the SA of the csi driver controller which volume snapshot classes go together with which storage classes. + +## Example driver configs + +### allowDefault +The simplest driver config takes the default storage class and default volume snapshot class and uses them, and no storage classes are restricted: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: driver-config + namespace: example-namespace +data: + infraClusterLabels: random-cluster-id #label used to distinguish between tenant clusters, if multiple clusters in same namespace + infraClusterNamespace: example-namespace #Used to tell the tenant cluster which namespace it lives in + infraStorageClassEnforcement: | + allowAll: true + allowDefault: true +``` + +### allowAll false, with default +The simplest driver config takes the default storage class and default volume snapshot class and uses them, and restricted to storage class a and b. Either storage class a or b should be the default: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: driver-config + namespace: example-namespace +data: + infraClusterLabels: random-cluster-id #label used to distinguish between tenant clusters, if multiple clusters in same namespace + infraClusterNamespace: example-namespace #Used to tell the tenant cluster which namespace it lives in + infraStorageClassEnforcement: | + allowAll: false + allowDefault: true + allowList: [storage_class_a, storage_class_b] +``` +Note ensure that the infra cluster has a default snapshot class defined, otherwise creation of the infra cluster snapshots will fail due to a missing snapshot class value. + +### Specify which storage class maps to which volume snapshot class, unrelated storage classes +The infra cluster has multiple storage classes and they map to volume snapshot classes. The storage classes are not related and require different volume snapshot classes + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: driver-config + namespace: example-namespace +data: + infraClusterLabels: random-cluster-id #label used to distinguish between tenant clusters, if multiple clusters in same namespace + infraClusterNamespace: example-namespace #Used to tell the tenant cluster which namespace it lives in + infraStorageClassEnforcement: | + allowAll: false + allowDefault: true + allowList: [storage_class_a, storage_class_b] + storageSnapshotMapping: - StorageClasses: + - storage_class_a + VolumeSnapshotClasses: + - volumesnapshot_class_a + - StorageClasses: + - storage_class_b + VolumeSnapshotClasses: + - volumesnapshot_class_b +``` +If one tries to create a snapshot using volume snapshot class `kubevirt_csi_vsc_y` on a PVC associated with storage class `storage_class_x`. The CSI driver will reject that request and return an error containig a list of valid volume snapshot classes. In this case `kubevirt_csi_vsc_x`. + +### Specify which storage class maps to which volume snapshot class, related storage classes +The infra cluster has multiple storage classes and they map to volume snapshot classes. The storage classes are not related and require different volume snapshot classes + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: driver-config + namespace: example-namespace +data: + infraClusterLabels: random-cluster-id #label used to distinguish between tenant clusters, if multiple clusters in same namespace + infraClusterNamespace: example-namespace #Used to tell the tenant cluster which namespace it lives in + infraStorageClassEnforcement: | + allowAll: false + allowDefault: true + allowList: [storage_class_a, storage_class_b] + storageSnapshotMapping: - StorageClasses: + - storage_class_a + - storage_class_b + VolumeSnapshotClasses: + - volumesnapshot_class_a + - volumesnapshot_class_b +``` +In this case, both storage classes and volumesnapshot classes are in the same `StorageClasses` group, so now trying to create a snapshot using `kubevirt_csi_vsc_y` of a PVC from storage class `storage_class_x` will succeed because that volume snapshot class is part of the group associated with that storage class. \ No newline at end of file diff --git a/go.mod b/go.mod index d8fe80a7..f4f826c3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module kubevirt.io/csi-driver -go 1.20 +go 1.21 require ( github.com/container-storage-interface/spec v1.6.0 @@ -25,7 +25,7 @@ require ( k8s.io/klog/v2 v2.120.1 k8s.io/utils v0.0.0-20240102154912-e7106e64919e kubevirt.io/api v1.1.1 - kubevirt.io/containerized-data-importer-api v1.58.1 + kubevirt.io/containerized-data-importer-api v1.59.0 ) require ( diff --git a/go.sum b/go.sum index b48e280e..23ffea30 100644 --- a/go.sum +++ b/go.sum @@ -1853,6 +1853,8 @@ kubevirt.io/api v1.1.1 h1:vt5bOpACArNFIudx1bcE1VeejQdh5wCd7Oz/uFBIkH8= kubevirt.io/api v1.1.1/go.mod h1:CJ4vZsaWhVN3jNbyc9y3lIZhw8nUHbWjap0xHABQiqc= kubevirt.io/containerized-data-importer-api v1.58.1 h1:Zbf0pCvxb4fBvtMR6uI2OIJZ4UfwFxripzOLMO4HPbI= kubevirt.io/containerized-data-importer-api v1.58.1/go.mod h1:Y/8ETgHS1GjO89bl682DPtQOYEU/1ctPFBz6Sjxm4DM= +kubevirt.io/containerized-data-importer-api v1.59.0 h1:GdDt9BlR0qHejpMaPfASbsG8JWDmBf1s7xZBj5W9qn0= +kubevirt.io/containerized-data-importer-api v1.59.0/go.mod h1:4yOGtCE7HvgKp7wftZZ3TBvDJ0x9d6N6KaRjRYcUFpE= kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc= kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= diff --git a/hack/cluster-sync-split.sh b/hack/cluster-sync-split.sh index 2bc9e683..d9599304 100755 --- a/hack/cluster-sync-split.sh +++ b/hack/cluster-sync-split.sh @@ -67,7 +67,6 @@ END mkdir -p ./deploy/controller-infra/dev-overlay mkdir -p ./deploy/tenant/dev-overlay -cluster::generate_controller_rbac $TENANT_CLUSTER_NAMESPACE cluster::generate_tenant_dev_kustomization cluster::generate_controller_dev_kustomization "controller-infra" $TENANT_CLUSTER_NAMESPACE tenant::deploy_csidriver_namespace $CSI_DRIVER_NAMESPACE diff --git a/hack/cluster-sync.sh b/hack/cluster-sync.sh index 0033bdf2..4aabcd3a 100755 --- a/hack/cluster-sync.sh +++ b/hack/cluster-sync.sh @@ -93,7 +93,6 @@ END mkdir -p ./deploy/controller-tenant/dev-overlay mkdir -p ./deploy/tenant/dev-overlay -cluster::generate_controller_rbac $TENANT_CLUSTER_NAMESPACE cluster::generate_tenant_dev_kustomization cluster::generate_controller_dev_kustomization "controller-tenant" $CSI_DRIVER_NAMESPACE tenant::deploy_csidriver_namespace $CSI_DRIVER_NAMESPACE diff --git a/hack/common.sh b/hack/common.sh index fcdfb312..ad841ff9 100644 --- a/hack/common.sh +++ b/hack/common.sh @@ -131,34 +131,6 @@ function tenant::deploy_snapshotresources() { ./kubevirtci kubectl-tenant apply -f ./deploy/tenant/base/snapshot.storage.k8s.io_volumesnapshotclasses.yaml ./kubevirtci kubectl-tenant apply -f ./deploy/tenant/base/snapshot.storage.k8s.io_volumesnapshotcontents.yaml ./kubevirtci kubectl-tenant apply -f ./deploy/tenant/base/snapshot.storage.k8s.io_volumesnapshots.yaml + # Make sure the infra rbd snapshot class is the default snapshot class + ./kubevirtci kubectl patch volumesnapshotclass csi-rbdplugin-snapclass --type merge -p '{"metadata": {"annotations":{"snapshot.storage.kubernetes.io/is-default-class":"true"}}}' } - -function cluster::generate_controller_rbac() { - cat <<- END | ./kubevirtci kubectl apply -f - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: kubevirt-csi-snapshot -rules: -- apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get"] -- apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotclasses"] - verbs: ["get", "list"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kubevirt-csi-snapshot -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kubevirt-csi-snapshot -subjects: -- kind: ServiceAccount - name: kubevirt-csi - namespace: $1 -END -} \ No newline at end of file diff --git a/pkg/generated/bindata.go b/pkg/generated/bindata.go index fd985460..3be644fc 100644 --- a/pkg/generated/bindata.go +++ b/pkg/generated/bindata.go @@ -1,11 +1,14 @@ // Code generated for package generated by go-bindata DO NOT EDIT. (@generated) // sources: -// deploy/000-csi-driver.yaml -// deploy/000-namespace.yaml -// deploy/020-autorization.yaml -// deploy/030-node.yaml -// deploy/040-controller.yaml -// deploy/configmap.yaml +// deploy/controller-infra/base/deploy.yaml +// deploy/controller-infra/base/kustomization.yaml +// deploy/controller-infra/dev-overlay/controller.yaml +// deploy/controller-infra/dev-overlay/infra-namespace-configmap.yaml +// deploy/controller-infra/dev-overlay/kustomization.yaml +// deploy/controller-tenant/base/deploy.yaml +// deploy/controller-tenant/base/kustomization.yaml +// deploy/controller-tenant/dev-overlay/controller.yaml +// deploy/controller-tenant/dev-overlay/kustomization.yaml // deploy/example/infracluster-kubeconfig.yaml // deploy/example/kubevirt-config.yaml // deploy/example/kubevirt.yaml @@ -13,7 +16,17 @@ // deploy/example/storageclass.yaml // deploy/example/test-pod.yaml // deploy/infra-cluster-service-account.yaml -// deploy/secret.yaml +// deploy/tenant/base/deploy.yaml +// deploy/tenant/base/kustomization.yaml +// deploy/tenant/base/rbac-snapshot-controller.yaml +// deploy/tenant/base/setup-snapshot-controller.yaml +// deploy/tenant/base/snapshot.storage.k8s.io_volumesnapshotclasses.yaml +// deploy/tenant/base/snapshot.storage.k8s.io_volumesnapshotcontents.yaml +// deploy/tenant/base/snapshot.storage.k8s.io_volumesnapshots.yaml +// deploy/tenant/dev-overlay/infra-namespace-configmap.yaml +// deploy/tenant/dev-overlay/kustomization.yaml +// deploy/tenant/dev-overlay/node.yaml +// deploy/tenant/dev-overlay/storageclass.yaml package generated import ( @@ -67,62 +80,796 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _deploy000CsiDriverYaml = []byte(`#TODO v1beta is deprecated in 1.19+ and promoted to v1 -apiVersion: storage.k8s.io/v1 -kind: CSIDriver +var _deployControllerInfraBaseDeployYaml = []byte(`kind: Deployment +apiVersion: apps/v1 +metadata: + name: kubevirt-csi-controller + namespace: kubevirt-csi-driver + labels: + app: kubevirt-csi-driver +spec: + replicas: 1 + selector: + matchLabels: + app: kubevirt-csi-driver + template: + metadata: + labels: + app: kubevirt-csi-driver + spec: + serviceAccount: kubevirt-csi + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + effect: "NoSchedule" + containers: + - name: csi-driver + imagePullPolicy: Always + image: quay.io/kubevirt/csi-driver:latest + args: + - "--endpoint=$(CSI_ENDPOINT)" + - "--infra-cluster-namespace=$(INFRACLUSTER_NAMESPACE)" + - "--infra-cluster-labels=$(INFRACLUSTER_LABELS)" + - "--tenant-cluster-kubeconfig=/var/run/secrets/tenantcluster/value" + - "--run-node-service=false" + - "--run-controller-service=true" + - "--v=5" + ports: + - name: healthz + containerPort: 10301 + protocol: TCP + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: INFRACLUSTER_NAMESPACE + valueFrom: + configMapKeyRef: + name: driver-config + key: infraClusterNamespace + - name: INFRACLUSTER_LABELS + valueFrom: + configMapKeyRef: + name: driver-config + key: infraClusterLabels + - name: INFRA_STORAGE_CLASS_ENFORCEMENT + valueFrom: + configMapKeyRef: + name: driver-config + key: infraStorageClassEnforcement + optional: true + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: tenantcluster + mountPath: "/var/run/secrets/tenantcluster" + resources: + requests: + memory: 50Mi + cpu: 10m + - name: csi-provisioner + image: quay.io/openshift/origin-csi-external-provisioner:latest + args: + - "--csi-address=$(ADDRESS)" + - "--default-fstype=ext4" + - "--kubeconfig=/var/run/secrets/tenantcluster/value" + - "--v=5" + - "--timeout=3m" + - "--retry-interval-max=1m" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: tenantcluster + mountPath: "/var/run/secrets/tenantcluster" + - name: csi-attacher + image: quay.io/openshift/origin-csi-external-attacher:latest + args: + - "--csi-address=$(ADDRESS)" + - "--kubeconfig=/var/run/secrets/tenantcluster/value" + - "--v=5" + - "--timeout=3m" + - "--retry-interval-max=1m" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: tenantcluster + mountPath: "/var/run/secrets/tenantcluster" + resources: + requests: + memory: 50Mi + cpu: 10m + - name: csi-liveness-probe + image: quay.io/openshift/origin-csi-livenessprobe:latest + args: + - "--csi-address=/csi/csi.sock" + - "--probe-timeout=3s" + - "--health-port=10301" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: tenantcluster + mountPath: "/var/run/secrets/tenantcluster" + resources: + requests: + memory: 50Mi + cpu: 10m + - name: csi-snapshotter + args: + - "--v=5" + - "--csi-address=/csi/csi.sock" + - "--kubeconfig=/var/run/secrets/tenantcluster/value" + image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.1 + imagePullPolicy: IfNotPresent + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /csi + name: socket-dir + - name: tenantcluster + mountPath: "/var/run/secrets/tenantcluster" + resources: + requests: + memory: 20Mi + cpu: 10m + volumes: + - name: socket-dir + emptyDir: {} + - name: tenantcluster + secret: + secretName: kvcluster-kubeconfig +`) + +func deployControllerInfraBaseDeployYamlBytes() ([]byte, error) { + return _deployControllerInfraBaseDeployYaml, nil +} + +func deployControllerInfraBaseDeployYaml() (*asset, error) { + bytes, err := deployControllerInfraBaseDeployYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/controller-infra/base/deploy.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployControllerInfraBaseKustomizationYaml = []byte(`commonLabels: + app: kubevirt-csi-driver +resources: +- deploy.yaml +`) + +func deployControllerInfraBaseKustomizationYamlBytes() ([]byte, error) { + return _deployControllerInfraBaseKustomizationYaml, nil +} + +func deployControllerInfraBaseKustomizationYaml() (*asset, error) { + bytes, err := deployControllerInfraBaseKustomizationYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/controller-infra/base/kustomization.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployControllerInfraDevOverlayControllerYaml = []byte(`kind: Deployment +apiVersion: apps/v1 +metadata: + name: kubevirt-csi-controller + namespace: kubevirt-csi-driver + labels: + app: kubevirt-csi-driver +spec: + template: + spec: + containers: + - name: csi-driver + image: registry:5000/kubevirt-csi-driver:latest +`) + +func deployControllerInfraDevOverlayControllerYamlBytes() ([]byte, error) { + return _deployControllerInfraDevOverlayControllerYaml, nil +} + +func deployControllerInfraDevOverlayControllerYaml() (*asset, error) { + bytes, err := deployControllerInfraDevOverlayControllerYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/controller-infra/dev-overlay/controller.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployControllerInfraDevOverlayInfraNamespaceConfigmapYaml = []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + name: driver-config + namespace: kubevirt-csi-driver +data: + infraClusterNamespace: kvcluster + infraClusterLabels: csi-driver/cluster=tenant +`) + +func deployControllerInfraDevOverlayInfraNamespaceConfigmapYamlBytes() ([]byte, error) { + return _deployControllerInfraDevOverlayInfraNamespaceConfigmapYaml, nil +} + +func deployControllerInfraDevOverlayInfraNamespaceConfigmapYaml() (*asset, error) { + bytes, err := deployControllerInfraDevOverlayInfraNamespaceConfigmapYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/controller-infra/dev-overlay/infra-namespace-configmap.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployControllerInfraDevOverlayKustomizationYaml = []byte(`resources: +- ../base +- infra-namespace-configmap.yaml +namespace: kvcluster +patches: +- path: controller.yaml +`) + +func deployControllerInfraDevOverlayKustomizationYamlBytes() ([]byte, error) { + return _deployControllerInfraDevOverlayKustomizationYaml, nil +} + +func deployControllerInfraDevOverlayKustomizationYaml() (*asset, error) { + bytes, err := deployControllerInfraDevOverlayKustomizationYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/controller-infra/dev-overlay/kustomization.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployControllerTenantBaseDeployYaml = []byte(`--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: kubevirt-csi-controller + namespace: kubevirt-csi-driver +spec: + replicas: 1 + selector: + matchLabels: + app: kubevirt-csi-driver + template: + metadata: + labels: + app: kubevirt-csi-driver + spec: + serviceAccount: kubevirt-csi-controller-sa + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + effect: "NoSchedule" + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: "NoSchedule" + containers: + - name: csi-driver + imagePullPolicy: Always + image: quay.io/kubevirt/csi-driver:latest + args: + - "--endpoint=$(CSI_ENDPOINT)" + - "--infra-cluster-namespace=$(INFRACLUSTER_NAMESPACE)" + - "--infra-cluster-kubeconfig=/var/run/secrets/infracluster/kubeconfig" + - "--infra-cluster-labels=$(INFRACLUSTER_LABELS)" + - --v=5 + ports: + - name: healthz + containerPort: 10301 + protocol: TCP + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: INFRACLUSTER_NAMESPACE + valueFrom: + configMapKeyRef: + name: driver-config + key: infraClusterNamespace + - name: INFRACLUSTER_LABELS + valueFrom: + configMapKeyRef: + name: driver-config + key: infraClusterLabels + - name: INFRA_STORAGE_CLASS_ENFORCEMENT + valueFrom: + configMapKeyRef: + name: driver-config + key: infraStorageClassEnforcement + optional: true + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: infracluster + mountPath: "/var/run/secrets/infracluster" + resources: + requests: + memory: 50Mi + cpu: 10m + limits: + memory: 500Mi + cpu: 250m + - name: csi-provisioner + image: quay.io/openshift/origin-csi-external-provisioner:latest + args: + - --csi-address=$(ADDRESS) + - --default-fstype=ext4 + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + resources: + requests: + memory: 50Mi + cpu: 10m + limits: + memory: 500Mi + cpu: 250m + - name: csi-attacher + image: quay.io/openshift/origin-csi-external-attacher:latest + args: + - --csi-address=$(ADDRESS) + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + resources: + requests: + memory: 50Mi + cpu: 10m + limits: + memory: 500Mi + cpu: 250m + - name: csi-liveness-probe + image: quay.io/openshift/origin-csi-livenessprobe:latest + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=10301 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + requests: + memory: 50Mi + cpu: 10m + limits: + memory: 500Mi + cpu: 250m + - name: csi-snapshotter + args: + - --v=3 + - --csi-address=/csi/csi.sock + image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.1 + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + requests: + memory: 20Mi + cpu: 10m + limits: + memory: 500Mi + cpu: 250m + volumes: + - name: socket-dir + emptyDir: {} + - name: infracluster + secret: + secretName: infra-cluster-credentials +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshotter-role +subjects: + - kind: ServiceAccount + name: kubevirt-csi-controller-sa + namespace: kubevirt-csi-driver +roleRef: + kind: ClusterRole + name: external-snapshotter-runner + apiGroup: rbac.authorization.k8s.io +`) + +func deployControllerTenantBaseDeployYamlBytes() ([]byte, error) { + return _deployControllerTenantBaseDeployYaml, nil +} + +func deployControllerTenantBaseDeployYaml() (*asset, error) { + bytes, err := deployControllerTenantBaseDeployYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/controller-tenant/base/deploy.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployControllerTenantBaseKustomizationYaml = []byte(`commonLabels: + app: kubevirt-csi-driver +resources: +- deploy.yaml +`) + +func deployControllerTenantBaseKustomizationYamlBytes() ([]byte, error) { + return _deployControllerTenantBaseKustomizationYaml, nil +} + +func deployControllerTenantBaseKustomizationYaml() (*asset, error) { + bytes, err := deployControllerTenantBaseKustomizationYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/controller-tenant/base/kustomization.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployControllerTenantDevOverlayControllerYaml = []byte(`kind: Deployment +apiVersion: apps/v1 metadata: - name: csi.kubevirt.io + name: kubevirt-csi-controller + namespace: kubevirt-csi-driver + labels: + app: kubevirt-csi-driver spec: - attachRequired: true - podInfoOnMount: true + template: + spec: + containers: + - name: csi-driver + image: 192.168.66.2:5000/kubevirt-csi-driver:latest +`) + +func deployControllerTenantDevOverlayControllerYamlBytes() ([]byte, error) { + return _deployControllerTenantDevOverlayControllerYaml, nil +} + +func deployControllerTenantDevOverlayControllerYaml() (*asset, error) { + bytes, err := deployControllerTenantDevOverlayControllerYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/controller-tenant/dev-overlay/controller.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployControllerTenantDevOverlayKustomizationYaml = []byte(`resources: +- ../base +namespace: kubevirt-csi-driver +patches: +- path: controller.yaml +`) + +func deployControllerTenantDevOverlayKustomizationYamlBytes() ([]byte, error) { + return _deployControllerTenantDevOverlayKustomizationYaml, nil +} + +func deployControllerTenantDevOverlayKustomizationYaml() (*asset, error) { + bytes, err := deployControllerTenantDevOverlayKustomizationYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/controller-tenant/dev-overlay/kustomization.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployExampleInfraclusterKubeconfigYaml = []byte(`apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: + server: + name: infra-cluster +contexts: +- context: + cluster: infra-cluster + namespace: + user: kubevirt-csi + name: only-context +current-context: only-context +kind: Config +preferences: {} +users: +- name: kubevirt-csi + user: + token: +`) + +func deployExampleInfraclusterKubeconfigYamlBytes() ([]byte, error) { + return _deployExampleInfraclusterKubeconfigYaml, nil +} + +func deployExampleInfraclusterKubeconfigYaml() (*asset, error) { + bytes, err := deployExampleInfraclusterKubeconfigYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/example/infracluster-kubeconfig.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployExampleKubevirtConfigYaml = []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + name: kubevirt-config + namespace: kubevirt +data: + default-network-interface: masquerade + feature-gates: DataVolumes,SRIOV,LiveMigration,CPUManager,CPUNodeDiscovery,Sidecar,Snapshot,HotplugVolumes + selinuxLauncherType: virt_launcher.process + smbios: |- + Family: KubeVirt + Manufacturer: KubeVirt + Product: None +`) + +func deployExampleKubevirtConfigYamlBytes() ([]byte, error) { + return _deployExampleKubevirtConfigYaml, nil +} + +func deployExampleKubevirtConfigYaml() (*asset, error) { + bytes, err := deployExampleKubevirtConfigYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/example/kubevirt-config.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployExampleKubevirtYaml = []byte(`apiVersion: kubevirt.io/v1alpha3 +kind: KubeVirt +metadata: + name: kubevirt + namespace: kubevirt +spec: + certificateRotateStrategy: {} + configuration: + developerConfiguration: + featureGates: + - DataVolumes + - SRIOV + - LiveMigration + - CPUManager + - CPUNodeDiscovery + - Sidecar + - Snapshot + - HotplugVolumes + network: + defaultNetworkInterface: masquerade + smbios: + family: KubeVirt + manufacturer: KubeVirt + product: None + selinuxLauncherType: virt_launcher.process + customizeComponents: {} + uninstallStrategy: BlockUninstallIfWorkloadsExist +`) + +func deployExampleKubevirtYamlBytes() ([]byte, error) { + return _deployExampleKubevirtYaml, nil +} + +func deployExampleKubevirtYaml() (*asset, error) { + bytes, err := deployExampleKubevirtYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/example/kubevirt.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployExampleStorageClaimYaml = []byte(`kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: 1g-kubevirt-disk +spec: + storageClassName: kubevirt + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +`) + +func deployExampleStorageClaimYamlBytes() ([]byte, error) { + return _deployExampleStorageClaimYaml, nil +} + +func deployExampleStorageClaimYaml() (*asset, error) { + bytes, err := deployExampleStorageClaimYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/example/storage-claim.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployExampleStorageclassYaml = []byte(`apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: kubevirt + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: csi.kubevirt.io +parameters: + infraStorageClassName: standard + bus: scsi +`) + +func deployExampleStorageclassYamlBytes() ([]byte, error) { + return _deployExampleStorageclassYaml, nil +} + +func deployExampleStorageclassYaml() (*asset, error) { + bytes, err := deployExampleStorageclassYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/example/storageclass.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployExampleTestPodYaml = []byte(`apiVersion: v1 +kind: Pod +metadata: + name: testpodwithcsi +spec: + containers: + - image: busybox + name: testpodwithcsi + command: ["sh", "-c", "while true; do ls -la /opt; echo this file system was made availble using kubevirt-csi-driver; mktmp /opt/test-XXXXXX; sleep 1m; done"] + imagePullPolicy: Always + volumeMounts: + - name: pv0002 + mountPath: "/opt" + volumes: + - name: pv0002 + persistentVolumeClaim: + claimName: 1g-kubevirt-disk `) -func deploy000CsiDriverYamlBytes() ([]byte, error) { - return _deploy000CsiDriverYaml, nil +func deployExampleTestPodYamlBytes() ([]byte, error) { + return _deployExampleTestPodYaml, nil } -func deploy000CsiDriverYaml() (*asset, error) { - bytes, err := deploy000CsiDriverYamlBytes() +func deployExampleTestPodYaml() (*asset, error) { + bytes, err := deployExampleTestPodYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "deploy/000-csi-driver.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "deploy/example/test-pod.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _deploy000NamespaceYaml = []byte(`apiVersion: v1 -kind: Namespace -metadata: - name: kubevirt-csi-driver - annotations: - openshift.io/node-selector: "" - labels: - name: kubevirt-csi-driver +var _deployInfraClusterServiceAccountYaml = []byte(`apiVersion: v1 +kind: ServiceAccount +metadata: + name: kubevirt-csi +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: kubevirt-csi +rules: +- apiGroups: ["cdi.kubevirt.io"] + resources: ["datavolumes"] + verbs: ["get", "create", "delete"] +- apiGroups: ["kubevirt.io"] + resources: ["virtualmachineinstances"] + verbs: ["list", "get"] +- apiGroups: ["subresources.kubevirt.io"] + resources: ["virtualmachineinstances/addvolume", "virtualmachineinstances/removevolume"] + verbs: ["update"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "create", "delete"] +- apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kubevirt-csi +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubevirt-csi +subjects: +- kind: ServiceAccount + name: kubevirt-csi + `) -func deploy000NamespaceYamlBytes() ([]byte, error) { - return _deploy000NamespaceYaml, nil +func deployInfraClusterServiceAccountYamlBytes() ([]byte, error) { + return _deployInfraClusterServiceAccountYaml, nil } -func deploy000NamespaceYaml() (*asset, error) { - bytes, err := deploy000NamespaceYamlBytes() +func deployInfraClusterServiceAccountYaml() (*asset, error) { + bytes, err := deployInfraClusterServiceAccountYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "deploy/000-namespace.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "deploy/infra-cluster-service-account.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _deploy020AutorizationYaml = []byte(`apiVersion: v1 -kind: ServiceAccount +var _deployTenantBaseDeployYaml = []byte(`apiVersion: storage.k8s.io/v1 +kind: CSIDriver metadata: - name: kubevirt-csi-node-sa - namespace: kubevirt-csi-driver + name: csi.kubevirt.io +spec: + attachRequired: true + podInfoOnMount: true + fsGroupPolicy: ReadWriteOnceWithFSType --- apiVersion: v1 kind: ServiceAccount @@ -135,10 +882,6 @@ kind: ClusterRole metadata: name: kubevirt-csi-controller-cr rules: - # Allow listing and creating CRDs - - apiGroups: ['apiextensions.k8s.io'] - resources: ['customresourcedefinitions'] - verbs: ['list', 'create'] - apiGroups: [''] resources: ['persistentvolumes'] verbs: ['create', 'delete', 'get', 'list', 'watch', 'update', 'patch'] @@ -189,6 +932,31 @@ rules: verbs: ["use"] resourceNames: ["privileged"] --- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubevirt-csi-controller-binding +subjects: + - kind: ServiceAccount + name: kubevirt-csi-controller-sa + namespace: kubevirt-csi-driver +roleRef: + kind: ClusterRole + name: kubevirt-csi-controller-cr + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kubevirt-csi-node-sa + namespace: kubevirt-csi-driver +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kubevirt-csi-snapshot-sa + namespace: kubevirt-csi-driver +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -226,50 +994,51 @@ rules: verbs: ["use"] resourceNames: ["privileged"] --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-snapshotter-runner +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] +--- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: kubevirt-csi-controller-binding + name: kubevirt-csi-node-binding subjects: - kind: ServiceAccount - name: kubevirt-csi-controller-sa + name: kubevirt-csi-node-sa namespace: kubevirt-csi-driver roleRef: kind: ClusterRole - name: kubevirt-csi-controller-cr + name: kubevirt-csi-node-cr apiGroup: rbac.authorization.k8s.io --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: kubevirt-csi-node-binding + name: csi-snapshotter-role subjects: - kind: ServiceAccount - name: kubevirt-csi-node-sa + name: kubevirt-csi-snapshot-sa namespace: kubevirt-csi-driver roleRef: kind: ClusterRole - name: kubevirt-csi-node-cr + # change the name also here if the ClusterRole gets renamed + name: external-snapshotter-runner apiGroup: rbac.authorization.k8s.io --- -`) - -func deploy020AutorizationYamlBytes() ([]byte, error) { - return _deploy020AutorizationYaml, nil -} - -func deploy020AutorizationYaml() (*asset, error) { - bytes, err := deploy020AutorizationYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "deploy/020-autorization.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _deploy030NodeYaml = []byte(`#TODO: Force DaemonSet to not run on master - see kind: DaemonSet apiVersion: apps/v1 metadata: @@ -286,7 +1055,6 @@ spec: labels: app: kubevirt-csi-driver spec: - hostNetwork: true serviceAccount: kubevirt-csi-node-sa priorityClassName: system-node-critical tolerations: @@ -300,28 +1068,16 @@ spec: image: quay.io/kubevirt/csi-driver:latest args: - "--endpoint=unix:/csi/csi.sock" - - "--namespace=kubevirt-csi-driver" - "--node-name=$(KUBE_NODE_NAME)" - - "--infra-cluster-namespace=$(INFRACLUSTER_NAMESPACE)" - - "--infra-cluster-kubeconfig=/var/run/secrets/infracluster/kubeconfig" + - "--run-node-service=true" + - "--run-controller-service=false" + - "--v=5" env: - name: KUBE_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - - name: INFRACLUSTER_NAMESPACE - valueFrom: - configMapKeyRef: - name: driver-config - key: infraClusterNamespace - - name: INFRACLUSTER_LABELS - valueFrom: - configMapKeyRef: - name: driver-config - key: infraClusterLabels volumeMounts: - - name: infracluster - mountPath: "/var/run/secrets/infracluster" - name: kubelet-dir mountPath: /var/lib/kubelet mountPropagation: "Bidirectional" @@ -333,7 +1089,6 @@ spec: mountPath: /run/udev ports: - name: healthz - # due to hostNetwork, this port is open on a node! containerPort: 10300 protocol: TCP livenessProbe: @@ -349,13 +1104,11 @@ spec: memory: 50Mi cpu: 10m - name: csi-node-driver-registrar - securityContext: - privileged: true image: quay.io/openshift/origin-csi-node-driver-registrar:latest args: - - --csi-address=$(ADDRESS) - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - - --v=5 + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--v=5" lifecycle: preStop: exec: @@ -377,9 +1130,9 @@ spec: - name: csi-liveness-probe image: quay.io/openshift/origin-csi-livenessprobe:latest args: - - --csi-address=/csi/csi.sock - - --probe-timeout=3s - - --health-port=10300 + - "--csi-address=/csi/csi.sock" + - "--probe-timeout=3s" + - "--health-port=10300" volumeMounts: - name: plugin-dir mountPath: /csi @@ -388,9 +1141,6 @@ spec: memory: 20Mi cpu: 5m volumes: - - name: infracluster - secret: - secretName: infra-cluster-credentials - name: kubelet-dir hostPath: path: /var/lib/kubelet @@ -410,445 +1160,1261 @@ spec: - name: udev hostPath: path: /run/udev +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: driver-config + namespace: kubevirt-csi-driver +data: + infraClusterNamespace: + infraClusterLabels: csi-driver/cluster=tenant +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: kubevirt + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: csi.kubevirt.io +parameters: + infraStorageClassName: standard + bus: scsi +--- +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: kubevirt-csi-snapclass +driver: csi.kubevirt.io +deletionPolicy: Delete `) -func deploy030NodeYamlBytes() ([]byte, error) { - return _deploy030NodeYaml, nil +func deployTenantBaseDeployYamlBytes() ([]byte, error) { + return _deployTenantBaseDeployYaml, nil } -func deploy030NodeYaml() (*asset, error) { - bytes, err := deploy030NodeYamlBytes() +func deployTenantBaseDeployYaml() (*asset, error) { + bytes, err := deployTenantBaseDeployYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "deploy/030-node.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "deploy/tenant/base/deploy.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _deploy040ControllerYaml = []byte(`kind: Deployment -apiVersion: apps/v1 -metadata: - name: kubevirt-csi-controller - namespace: kubevirt-csi-driver -spec: - replicas: 1 - selector: - matchLabels: - app: kubevirt-csi-driver - template: - metadata: - labels: - app: kubevirt-csi-driver - spec: - hostNetwork: true - serviceAccount: kubevirt-csi-controller-sa - priorityClassName: system-cluster-critical - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/master - operator: Exists - effect: "NoSchedule" - containers: - - name: csi-driver - imagePullPolicy: Always - image: quay.io/kubevirt/csi-driver:latest - args: - - "--endpoint=$(CSI_ENDPOINT)" - - "--namespace=kubevirt-csi-driver" - - "--infra-cluster-namespace=$(INFRACLUSTER_NAMESPACE)" - - "--infra-cluster-kubeconfig=/var/run/secrets/infracluster/kubeconfig" - - "--infra-cluster-labels=$(INFRACLUSTER_LABELS)" - - --v=5 - ports: - - name: healthz - # Due to hostNetwork, this port is open on a node! - containerPort: 10301 - protocol: TCP - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: INFRACLUSTER_NAMESPACE - valueFrom: - configMapKeyRef: - name: driver-config - key: infraClusterNamespace - - name: INFRACLUSTER_LABELS - valueFrom: - configMapKeyRef: - name: driver-config - key: infraClusterLabels - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: infracluster - mountPath: "/var/run/secrets/infracluster" - resources: - requests: - memory: 50Mi - cpu: 10m - - name: csi-provisioner - image: quay.io/openshift/origin-csi-external-provisioner:latest - args: - - --csi-address=$(ADDRESS) - - --default-fstype=ext4 - - --v=5 - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-attacher - image: quay.io/openshift/origin-csi-external-attacher:latest - args: - - --csi-address=$(ADDRESS) - - --v=5 - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - resources: - requests: - memory: 50Mi - cpu: 10m - - name: csi-liveness-probe - image: quay.io/openshift/origin-csi-livenessprobe:latest - args: - - --csi-address=/csi/csi.sock - - --probe-timeout=3s - - --health-port=10301 - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: - requests: - memory: 50Mi - cpu: 10m - volumes: - - name: socket-dir - emptyDir: {} - - name: infracluster - secret: - secretName: infra-cluster-credentials +var _deployTenantBaseKustomizationYaml = []byte(`commonLabels: + app: kubevirt-csi-driver +resources: +- rbac-snapshot-controller.yaml +- setup-snapshot-controller.yaml +- snapshot.storage.k8s.io_volumesnapshotclasses.yaml +- snapshot.storage.k8s.io_volumesnapshotcontents.yaml +- snapshot.storage.k8s.io_volumesnapshots.yaml +- deploy.yaml `) -func deploy040ControllerYamlBytes() ([]byte, error) { - return _deploy040ControllerYaml, nil -} +func deployTenantBaseKustomizationYamlBytes() ([]byte, error) { + return _deployTenantBaseKustomizationYaml, nil +} + +func deployTenantBaseKustomizationYaml() (*asset, error) { + bytes, err := deployTenantBaseKustomizationYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "deploy/tenant/base/kustomization.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _deployTenantBaseRbacSnapshotControllerYaml = []byte(`# RBAC file for the snapshot controller. +# +# The snapshot controller implements the control loop for CSI snapshot functionality. +# It should be installed as part of the base Kubernetes distribution in an appropriate +# namespace for components implementing base system functionality. For installing with +# Vanilla Kubernetes, kube-system makes sense for the namespace. + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: snapshot-controller + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update", "patch"] + + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotcontents/status"] + verbs: ["patch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshots"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshots/status"] + verbs: ["update", "patch"] -func deploy040ControllerYaml() (*asset, error) { - bytes, err := deploy040ControllerYamlBytes() - if err != nil { - return nil, err - } + # Enable this RBAC rule only when using distributed snapshotting, i.e. when the enable-distributed-snapshotting flag is set to true + # - apiGroups: [""] + # resources: ["nodes"] + # verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-role +subjects: + - kind: ServiceAccount + name: snapshot-controller + namespace: kube-system +roleRef: + kind: ClusterRole + name: snapshot-controller-runner + apiGroup: rbac.authorization.k8s.io - info := bindataFileInfo{name: "deploy/040-controller.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-leaderelection + namespace: kube-system +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] -var _deployConfigmapYaml = []byte(`apiVersion: v1 -kind: ConfigMap +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: driver-config - namespace: kubevirt-csi-driver -data: - infraClusterNamespace: - infraClusterLabels: key=value,key=value,... + name: snapshot-controller-leaderelection + namespace: kube-system +subjects: + - kind: ServiceAccount + name: snapshot-controller +roleRef: + kind: Role + name: snapshot-controller-leaderelection + apiGroup: rbac.authorization.k8s.io `) -func deployConfigmapYamlBytes() ([]byte, error) { - return _deployConfigmapYaml, nil +func deployTenantBaseRbacSnapshotControllerYamlBytes() ([]byte, error) { + return _deployTenantBaseRbacSnapshotControllerYaml, nil } -func deployConfigmapYaml() (*asset, error) { - bytes, err := deployConfigmapYamlBytes() +func deployTenantBaseRbacSnapshotControllerYaml() (*asset, error) { + bytes, err := deployTenantBaseRbacSnapshotControllerYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "deploy/configmap.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "deploy/tenant/base/rbac-snapshot-controller.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _deployExampleInfraclusterKubeconfigYaml = []byte(`apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: - server: - name: infra-cluster -contexts: -- context: - cluster: infra-cluster - namespace: - user: kubevirt-csi - name: only-context -current-context: only-context -kind: Config -preferences: {} -users: -- name: kubevirt-csi - user: - token: +var _deployTenantBaseSetupSnapshotControllerYaml = []byte(`# This YAML file shows how to deploy the snapshot controller + +# The snapshot controller implements the control loop for CSI snapshot functionality. +# It should be installed as part of the base Kubernetes distribution in an appropriate +# namespace for components implementing base system functionality. For installing with +# Vanilla Kubernetes, kube-system makes sense for the namespace. + +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: snapshot-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: snapshot-controller + # The snapshot controller won't be marked as ready if the v1 CRDs are unavailable. + # The flag --retry-crd-interval-max is used to determine how long the controller + # will wait for the CRDs to become available before exiting. The default is 30 seconds + # so minReadySeconds should be set slightly higher than the flag value. + minReadySeconds: 35 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: snapshot-controller + spec: + serviceAccountName: snapshot-controller + containers: + - name: snapshot-controller + image: registry.k8s.io/sig-storage/snapshot-controller:v6.3.1 + args: + - "--v=5" + - "--leader-election=true" + # Add a marker to the snapshot-controller manifests. This is needed to enable feature gates in CSI prow jobs. + # For example, in https://github.com/kubernetes-csi/csi-release-tools/pull/209, the snapshot-controller YAML is updated to add --prevent-volume-mode-conversion=true so that the feature can be enabled for certain e2e tests. + # end snapshot controller args + imagePullPolicy: IfNotPresent `) -func deployExampleInfraclusterKubeconfigYamlBytes() ([]byte, error) { - return _deployExampleInfraclusterKubeconfigYaml, nil +func deployTenantBaseSetupSnapshotControllerYamlBytes() ([]byte, error) { + return _deployTenantBaseSetupSnapshotControllerYaml, nil } -func deployExampleInfraclusterKubeconfigYaml() (*asset, error) { - bytes, err := deployExampleInfraclusterKubeconfigYamlBytes() +func deployTenantBaseSetupSnapshotControllerYaml() (*asset, error) { + bytes, err := deployTenantBaseSetupSnapshotControllerYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "deploy/example/infracluster-kubeconfig.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "deploy/tenant/base/setup-snapshot-controller.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _deployExampleKubevirtConfigYaml = []byte(`apiVersion: v1 -kind: ConfigMap +var _deployTenantBaseSnapshotStorageK8sIo_volumesnapshotclassesYaml = []byte(`--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: - name: kubevirt-config - namespace: kubevirt -data: - default-network-interface: masquerade - feature-gates: DataVolumes,SRIOV,LiveMigration,CPUManager,CPUNodeDiscovery,Sidecar,Snapshot,HotplugVolumes - selinuxLauncherType: virt_launcher.process - smbios: |- - Family: KubeVirt - Manufacturer: KubeVirt - Product: None + annotations: + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/814" + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + shortNames: + - vsclass + - vsclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass" + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: false + storage: false + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] `) -func deployExampleKubevirtConfigYamlBytes() ([]byte, error) { - return _deployExampleKubevirtConfigYaml, nil +func deployTenantBaseSnapshotStorageK8sIo_volumesnapshotclassesYamlBytes() ([]byte, error) { + return _deployTenantBaseSnapshotStorageK8sIo_volumesnapshotclassesYaml, nil } -func deployExampleKubevirtConfigYaml() (*asset, error) { - bytes, err := deployExampleKubevirtConfigYamlBytes() +func deployTenantBaseSnapshotStorageK8sIo_volumesnapshotclassesYaml() (*asset, error) { + bytes, err := deployTenantBaseSnapshotStorageK8sIo_volumesnapshotclassesYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "deploy/example/kubevirt-config.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "deploy/tenant/base/snapshot.storage.k8s.io_volumesnapshotclasses.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _deployExampleKubevirtYaml = []byte(`apiVersion: kubevirt.io/v1alpha3 -kind: KubeVirt +var _deployTenantBaseSnapshotStorageK8sIo_volumesnapshotcontentsYaml = []byte(`--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: - name: kubevirt - namespace: kubevirt + annotations: + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/955" + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io spec: - certificateRotateStrategy: {} - configuration: - developerConfiguration: - featureGates: - - DataVolumes - - SRIOV - - LiveMigration - - CPUManager - - CPUNodeDiscovery - - Sidecar - - Snapshot - - HotplugVolumes - network: - defaultNetworkInterface: masquerade - smbios: - family: KubeVirt - manufacturer: KubeVirt - product: None - selinuxLauncherType: virt_launcher.process - customizeComponents: {} - uninstallStrategy: BlockUninstallIfWorkloadsExist + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + shortNames: + - vsc + - vscs + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] + sourceVolumeMode: + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. + type: string + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command ` + "`" + `date + +%s%N` + "`" + ` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + volumeGroupSnapshotHandle: + description: VolumeGroupSnapshotHandle is the CSI "group_snapshot_id" + of a group snapshot on the underlying storage system. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. + jsonPath: .spec.volumeSnapshotRef.namespace + name: VolumeSnapshotNamespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent" + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required. + type: string + source: + description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command ` + "`" + `date +%s%N` + "`" + ` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] `) -func deployExampleKubevirtYamlBytes() ([]byte, error) { - return _deployExampleKubevirtYaml, nil +func deployTenantBaseSnapshotStorageK8sIo_volumesnapshotcontentsYamlBytes() ([]byte, error) { + return _deployTenantBaseSnapshotStorageK8sIo_volumesnapshotcontentsYaml, nil } -func deployExampleKubevirtYaml() (*asset, error) { - bytes, err := deployExampleKubevirtYamlBytes() +func deployTenantBaseSnapshotStorageK8sIo_volumesnapshotcontentsYaml() (*asset, error) { + bytes, err := deployTenantBaseSnapshotStorageK8sIo_volumesnapshotcontentsYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "deploy/example/kubevirt.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "deploy/tenant/base/snapshot.storage.k8s.io_volumesnapshotcontents.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _deployExampleStorageClaimYaml = []byte(`kind: PersistentVolumeClaim -apiVersion: v1 +var _deployTenantBaseSnapshotStorageK8sIo_volumesnapshotsYaml = []byte(`--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: - name: 1g-kubevirt-disk + annotations: + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/814" + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io spec: - storageClassName: kubevirt - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + shortNames: + - vs + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of + the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing + VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from + this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure + both are pointing at each other. Binding MUST be verified prior to usage of + this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. + type: string + type: object + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + volumeGroupSnapshotName: + description: VolumeGroupSnapshotName is the name of the VolumeGroupSnapshot + of which this VolumeSnapshot is a part of. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicates if the snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the minimum size of volume required to rehydrate from this snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + # This indicates the v1beta1 version of the custom resource is deprecated. + # API requests to this version receive a warning in the server response. + deprecated: true + # This overrides the default warning returned to clients making v1beta1 API requests. + deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot" + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.' + properties: + source: + description: source specifies where a snapshot will be created from. This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.' + type: string + required: + - source + type: object + status: + description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object. + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] `) -func deployExampleStorageClaimYamlBytes() ([]byte, error) { - return _deployExampleStorageClaimYaml, nil +func deployTenantBaseSnapshotStorageK8sIo_volumesnapshotsYamlBytes() ([]byte, error) { + return _deployTenantBaseSnapshotStorageK8sIo_volumesnapshotsYaml, nil } -func deployExampleStorageClaimYaml() (*asset, error) { - bytes, err := deployExampleStorageClaimYamlBytes() +func deployTenantBaseSnapshotStorageK8sIo_volumesnapshotsYaml() (*asset, error) { + bytes, err := deployTenantBaseSnapshotStorageK8sIo_volumesnapshotsYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "deploy/example/storage-claim.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "deploy/tenant/base/snapshot.storage.k8s.io_volumesnapshots.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _deployExampleStorageclassYaml = []byte(`apiVersion: storage.k8s.io/v1 -kind: StorageClass +var _deployTenantDevOverlayInfraNamespaceConfigmapYaml = []byte(`apiVersion: v1 +kind: ConfigMap metadata: - name: kubevirt - annotations: - storageclass.kubernetes.io/is-default-class: "true" -provisioner: csi.kubevirt.io -parameters: - infraStorageClassName: standard - bus: scsi + name: driver-config + namespace: kubevirt-csi-driver +data: + infraClusterNamespace: kvcluster + infraClusterLabels: csi-driver/cluster=tenant `) -func deployExampleStorageclassYamlBytes() ([]byte, error) { - return _deployExampleStorageclassYaml, nil +func deployTenantDevOverlayInfraNamespaceConfigmapYamlBytes() ([]byte, error) { + return _deployTenantDevOverlayInfraNamespaceConfigmapYaml, nil } -func deployExampleStorageclassYaml() (*asset, error) { - bytes, err := deployExampleStorageclassYamlBytes() +func deployTenantDevOverlayInfraNamespaceConfigmapYaml() (*asset, error) { + bytes, err := deployTenantDevOverlayInfraNamespaceConfigmapYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "deploy/example/storageclass.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "deploy/tenant/dev-overlay/infra-namespace-configmap.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _deployExampleTestPodYaml = []byte(`apiVersion: v1 -kind: Pod -metadata: - name: testpodwithcsi -spec: - containers: - - image: busybox - name: testpodwithcsi - command: ["sh", "-c", "while true; do ls -la /opt; echo this file system was made availble using kubevirt-csi-driver; mktmp /opt/test-XXXXXX; sleep 1m; done"] - imagePullPolicy: Always - volumeMounts: - - name: pv0002 - mountPath: "/opt" - volumes: - - name: pv0002 - persistentVolumeClaim: - claimName: 1g-kubevirt-disk +var _deployTenantDevOverlayKustomizationYaml = []byte(`resources: +- ../base +namespace: kubevirt-csi-driver +patches: +- path: infra-namespace-configmap.yaml +- path: node.yaml +- path: storageclass.yaml `) -func deployExampleTestPodYamlBytes() ([]byte, error) { - return _deployExampleTestPodYaml, nil +func deployTenantDevOverlayKustomizationYamlBytes() ([]byte, error) { + return _deployTenantDevOverlayKustomizationYaml, nil } -func deployExampleTestPodYaml() (*asset, error) { - bytes, err := deployExampleTestPodYamlBytes() +func deployTenantDevOverlayKustomizationYaml() (*asset, error) { + bytes, err := deployTenantDevOverlayKustomizationYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "deploy/example/test-pod.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "deploy/tenant/dev-overlay/kustomization.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _deployInfraClusterServiceAccountYaml = []byte(`apiVersion: v1 -kind: ServiceAccount -metadata: - name: kubevirt-csi ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: kubevirt-csi -rules: -- apiGroups: ["cdi.kubevirt.io"] - resources: ["datavolumes"] - verbs: ["get", "create", "delete"] -- apiGroups: ["kubevirt.io"] - resources: ["virtualmachineinstances"] - verbs: ["list"] -- apiGroups: ["subresources.kubevirt.io"] - resources: ["virtualmachineinstances/addvolume", "virtualmachineinstances/removevolume"] - verbs: ["update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding +var _deployTenantDevOverlayNodeYaml = []byte(`kind: DaemonSet +apiVersion: apps/v1 metadata: - name: kubevirt-csi -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kubevirt-csi -subjects: -- kind: ServiceAccount - name: kubevirt-csi ----`) + name: kubevirt-csi-node + namespace: kubevirt-csi-driver +spec: + template: + spec: + containers: + - name: csi-driver + image: 192.168.66.2:5000/kubevirt-csi-driver:latest +`) -func deployInfraClusterServiceAccountYamlBytes() ([]byte, error) { - return _deployInfraClusterServiceAccountYaml, nil +func deployTenantDevOverlayNodeYamlBytes() ([]byte, error) { + return _deployTenantDevOverlayNodeYaml, nil } -func deployInfraClusterServiceAccountYaml() (*asset, error) { - bytes, err := deployInfraClusterServiceAccountYamlBytes() +func deployTenantDevOverlayNodeYaml() (*asset, error) { + bytes, err := deployTenantDevOverlayNodeYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "deploy/infra-cluster-service-account.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "deploy/tenant/dev-overlay/node.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _deploySecretYaml = []byte(`apiVersion: v1 -kind: Secret +var _deployTenantDevOverlayStorageclassYaml = []byte(`apiVersion: storage.k8s.io/v1 +kind: StorageClass metadata: - name: infra-cluster-credentials - namespace: kubevirt-csi-driver -data: - kubeconfig: `) + name: kubevirt + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: csi.kubevirt.io +parameters: + infraStorageClassName: rook-ceph-block + bus: scsi +`) -func deploySecretYamlBytes() ([]byte, error) { - return _deploySecretYaml, nil +func deployTenantDevOverlayStorageclassYamlBytes() ([]byte, error) { + return _deployTenantDevOverlayStorageclassYaml, nil } -func deploySecretYaml() (*asset, error) { - bytes, err := deploySecretYamlBytes() +func deployTenantDevOverlayStorageclassYaml() (*asset, error) { + bytes, err := deployTenantDevOverlayStorageclassYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "deploy/secret.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "deploy/tenant/dev-overlay/storageclass.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -905,31 +2471,46 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "deploy/000-csi-driver.yaml": deploy000CsiDriverYaml, - "deploy/000-namespace.yaml": deploy000NamespaceYaml, - "deploy/020-autorization.yaml": deploy020AutorizationYaml, - "deploy/030-node.yaml": deploy030NodeYaml, - "deploy/040-controller.yaml": deploy040ControllerYaml, - "deploy/configmap.yaml": deployConfigmapYaml, - "deploy/example/infracluster-kubeconfig.yaml": deployExampleInfraclusterKubeconfigYaml, - "deploy/example/kubevirt-config.yaml": deployExampleKubevirtConfigYaml, - "deploy/example/kubevirt.yaml": deployExampleKubevirtYaml, - "deploy/example/storage-claim.yaml": deployExampleStorageClaimYaml, - "deploy/example/storageclass.yaml": deployExampleStorageclassYaml, - "deploy/example/test-pod.yaml": deployExampleTestPodYaml, - "deploy/infra-cluster-service-account.yaml": deployInfraClusterServiceAccountYaml, - "deploy/secret.yaml": deploySecretYaml, + "deploy/controller-infra/base/deploy.yaml": deployControllerInfraBaseDeployYaml, + "deploy/controller-infra/base/kustomization.yaml": deployControllerInfraBaseKustomizationYaml, + "deploy/controller-infra/dev-overlay/controller.yaml": deployControllerInfraDevOverlayControllerYaml, + "deploy/controller-infra/dev-overlay/infra-namespace-configmap.yaml": deployControllerInfraDevOverlayInfraNamespaceConfigmapYaml, + "deploy/controller-infra/dev-overlay/kustomization.yaml": deployControllerInfraDevOverlayKustomizationYaml, + "deploy/controller-tenant/base/deploy.yaml": deployControllerTenantBaseDeployYaml, + "deploy/controller-tenant/base/kustomization.yaml": deployControllerTenantBaseKustomizationYaml, + "deploy/controller-tenant/dev-overlay/controller.yaml": deployControllerTenantDevOverlayControllerYaml, + "deploy/controller-tenant/dev-overlay/kustomization.yaml": deployControllerTenantDevOverlayKustomizationYaml, + "deploy/example/infracluster-kubeconfig.yaml": deployExampleInfraclusterKubeconfigYaml, + "deploy/example/kubevirt-config.yaml": deployExampleKubevirtConfigYaml, + "deploy/example/kubevirt.yaml": deployExampleKubevirtYaml, + "deploy/example/storage-claim.yaml": deployExampleStorageClaimYaml, + "deploy/example/storageclass.yaml": deployExampleStorageclassYaml, + "deploy/example/test-pod.yaml": deployExampleTestPodYaml, + "deploy/infra-cluster-service-account.yaml": deployInfraClusterServiceAccountYaml, + "deploy/tenant/base/deploy.yaml": deployTenantBaseDeployYaml, + "deploy/tenant/base/kustomization.yaml": deployTenantBaseKustomizationYaml, + "deploy/tenant/base/rbac-snapshot-controller.yaml": deployTenantBaseRbacSnapshotControllerYaml, + "deploy/tenant/base/setup-snapshot-controller.yaml": deployTenantBaseSetupSnapshotControllerYaml, + "deploy/tenant/base/snapshot.storage.k8s.io_volumesnapshotclasses.yaml": deployTenantBaseSnapshotStorageK8sIo_volumesnapshotclassesYaml, + "deploy/tenant/base/snapshot.storage.k8s.io_volumesnapshotcontents.yaml": deployTenantBaseSnapshotStorageK8sIo_volumesnapshotcontentsYaml, + "deploy/tenant/base/snapshot.storage.k8s.io_volumesnapshots.yaml": deployTenantBaseSnapshotStorageK8sIo_volumesnapshotsYaml, + "deploy/tenant/dev-overlay/infra-namespace-configmap.yaml": deployTenantDevOverlayInfraNamespaceConfigmapYaml, + "deploy/tenant/dev-overlay/kustomization.yaml": deployTenantDevOverlayKustomizationYaml, + "deploy/tenant/dev-overlay/node.yaml": deployTenantDevOverlayNodeYaml, + "deploy/tenant/dev-overlay/storageclass.yaml": deployTenantDevOverlayStorageclassYaml, } // AssetDir returns the file names below a certain // directory embedded in the file by go-bindata. // For example if you run go-bindata on data/... and data contains the // following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png +// +// data/ +// foo.txt +// img/ +// a.png +// b.png +// // then AssetDir("data") would return []string{"foo.txt", "img"} // AssetDir("data/img") would return []string{"a.png", "b.png"} // AssetDir("foo.txt") and AssetDir("notexist") would return an error @@ -963,12 +2544,27 @@ type bintree struct { var _bintree = &bintree{nil, map[string]*bintree{ "deploy": {nil, map[string]*bintree{ - "000-csi-driver.yaml": {deploy000CsiDriverYaml, map[string]*bintree{}}, - "000-namespace.yaml": {deploy000NamespaceYaml, map[string]*bintree{}}, - "020-autorization.yaml": {deploy020AutorizationYaml, map[string]*bintree{}}, - "030-node.yaml": {deploy030NodeYaml, map[string]*bintree{}}, - "040-controller.yaml": {deploy040ControllerYaml, map[string]*bintree{}}, - "configmap.yaml": {deployConfigmapYaml, map[string]*bintree{}}, + "controller-infra": {nil, map[string]*bintree{ + "base": {nil, map[string]*bintree{ + "deploy.yaml": {deployControllerInfraBaseDeployYaml, map[string]*bintree{}}, + "kustomization.yaml": {deployControllerInfraBaseKustomizationYaml, map[string]*bintree{}}, + }}, + "dev-overlay": {nil, map[string]*bintree{ + "controller.yaml": {deployControllerInfraDevOverlayControllerYaml, map[string]*bintree{}}, + "infra-namespace-configmap.yaml": {deployControllerInfraDevOverlayInfraNamespaceConfigmapYaml, map[string]*bintree{}}, + "kustomization.yaml": {deployControllerInfraDevOverlayKustomizationYaml, map[string]*bintree{}}, + }}, + }}, + "controller-tenant": {nil, map[string]*bintree{ + "base": {nil, map[string]*bintree{ + "deploy.yaml": {deployControllerTenantBaseDeployYaml, map[string]*bintree{}}, + "kustomization.yaml": {deployControllerTenantBaseKustomizationYaml, map[string]*bintree{}}, + }}, + "dev-overlay": {nil, map[string]*bintree{ + "controller.yaml": {deployControllerTenantDevOverlayControllerYaml, map[string]*bintree{}}, + "kustomization.yaml": {deployControllerTenantDevOverlayKustomizationYaml, map[string]*bintree{}}, + }}, + }}, "example": {nil, map[string]*bintree{ "infracluster-kubeconfig.yaml": {deployExampleInfraclusterKubeconfigYaml, map[string]*bintree{}}, "kubevirt-config.yaml": {deployExampleKubevirtConfigYaml, map[string]*bintree{}}, @@ -978,7 +2574,23 @@ var _bintree = &bintree{nil, map[string]*bintree{ "test-pod.yaml": {deployExampleTestPodYaml, map[string]*bintree{}}, }}, "infra-cluster-service-account.yaml": {deployInfraClusterServiceAccountYaml, map[string]*bintree{}}, - "secret.yaml": {deploySecretYaml, map[string]*bintree{}}, + "tenant": {nil, map[string]*bintree{ + "base": {nil, map[string]*bintree{ + "deploy.yaml": {deployTenantBaseDeployYaml, map[string]*bintree{}}, + "kustomization.yaml": {deployTenantBaseKustomizationYaml, map[string]*bintree{}}, + "rbac-snapshot-controller.yaml": {deployTenantBaseRbacSnapshotControllerYaml, map[string]*bintree{}}, + "setup-snapshot-controller.yaml": {deployTenantBaseSetupSnapshotControllerYaml, map[string]*bintree{}}, + "snapshot.storage.k8s.io_volumesnapshotclasses.yaml": {deployTenantBaseSnapshotStorageK8sIo_volumesnapshotclassesYaml, map[string]*bintree{}}, + "snapshot.storage.k8s.io_volumesnapshotcontents.yaml": {deployTenantBaseSnapshotStorageK8sIo_volumesnapshotcontentsYaml, map[string]*bintree{}}, + "snapshot.storage.k8s.io_volumesnapshots.yaml": {deployTenantBaseSnapshotStorageK8sIo_volumesnapshotsYaml, map[string]*bintree{}}, + }}, + "dev-overlay": {nil, map[string]*bintree{ + "infra-namespace-configmap.yaml": {deployTenantDevOverlayInfraNamespaceConfigmapYaml, map[string]*bintree{}}, + "kustomization.yaml": {deployTenantDevOverlayKustomizationYaml, map[string]*bintree{}}, + "node.yaml": {deployTenantDevOverlayNodeYaml, map[string]*bintree{}}, + "storageclass.yaml": {deployTenantDevOverlayStorageclassYaml, map[string]*bintree{}}, + }}, + }}, }}, }} diff --git a/pkg/kubevirt/client.go b/pkg/kubevirt/client.go index dcb0770e..7fd4daa9 100644 --- a/pkg/kubevirt/client.go +++ b/pkg/kubevirt/client.go @@ -27,10 +27,22 @@ import ( ) const ( - vmiSubresourceURL = "/apis/subresources.kubevirt.io/%s/namespaces/%s/virtualmachineinstances/%s/%s" - annDefaultSnapshotClass = "snapshot.storage.kubernetes.io/is-default-class" + vmiSubresourceURL = "/apis/subresources.kubevirt.io/%s/namespaces/%s/virtualmachineinstances/%s/%s" + annDefaultSnapshotClass = "snapshot.storage.kubernetes.io/is-default-class" + InfraStorageClassNameParameter = "infraStorageClassName" + InfraSnapshotClassNameParameter = "infraSnapshotClassName" ) +type InfraTenantStorageSnapshotMapping struct { + VolumeSnapshotClasses []InfraToTenantMapping + StorageClasses []string +} + +type InfraToTenantMapping struct { + Infra string + Tenant string +} + //go:generate mockgen -source=./client.go -destination=./mock/client_generated.go -package=mock // ClientBuilderFuncType is function type for building infra-cluster clients @@ -56,30 +68,32 @@ type Client interface { } type client struct { - kubernetesClient kubernetes.Interface - virtClient kubecli.Interface - cdiClient cdicli.Interface - snapClient snapcli.Interface - restClient *rest.RESTClient - storageClassEnforcement util.StorageClassEnforcement - infraLabelMap map[string]string - volumePrefix string + infraKubernetesClient kubernetes.Interface + tenantKubernetesClient kubernetes.Interface + virtClient kubecli.Interface + cdiClient cdicli.Interface + infraSnapClient snapcli.Interface + restClient *rest.RESTClient + storageClassEnforcement util.StorageClassEnforcement + infraLabelMap map[string]string + volumePrefix string + infraTenantStorageSnapshotMapping []InfraTenantStorageSnapshotMapping } // NewClient New creates our client wrapper object for the actual kubeVirt and kubernetes clients we use. -func NewClient(config *rest.Config, infraClusterLabelMap map[string]string, storageClassEnforcement util.StorageClassEnforcement, prefix string) (Client, error) { +func NewClient(infraConfig *rest.Config, infraClusterLabelMap map[string]string, tenantKubernetesClient kubernetes.Interface, tenantSnapshotClient snapcli.Interface, storageClassEnforcement util.StorageClassEnforcement, prefix string) (Client, error) { result := &client{} Scheme := runtime.NewScheme() Codecs := serializer.NewCodecFactory(Scheme) - shallowCopy := *config + shallowCopy := *infraConfig shallowCopy.GroupVersion = &kubevirtv1.StorageGroupVersion shallowCopy.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: Codecs} shallowCopy.APIPath = "/apis" shallowCopy.ContentType = runtime.ContentTypeJSON - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() + if infraConfig.UserAgent == "" { + infraConfig.UserAgent = rest.DefaultKubernetesUserAgent() } restClient, err := rest.RESTClientFor(&shallowCopy) @@ -87,20 +101,20 @@ func NewClient(config *rest.Config, infraClusterLabelMap map[string]string, stor return nil, err } - clientset, err := kubernetes.NewForConfig(config) + clientset, err := kubernetes.NewForConfig(infraConfig) if err != nil { return nil, err } - result.kubernetesClient = clientset - kubevirtClient, err := kubecli.NewForConfig(config) + result.infraKubernetesClient = clientset + kubevirtClient, err := kubecli.NewForConfig(infraConfig) if err != nil { return nil, err } - cdiClient, err := cdicli.NewForConfig(config) + cdiClient, err := cdicli.NewForConfig(infraConfig) if err != nil { return nil, err } - snapClient, err := snapcli.NewForConfig(config) + snapClient, err := snapcli.NewForConfig(infraConfig) if err != nil { return nil, err } @@ -108,10 +122,16 @@ func NewClient(config *rest.Config, infraClusterLabelMap map[string]string, stor result.virtClient = kubevirtClient result.cdiClient = cdiClient result.restClient = restClient - result.snapClient = snapClient + result.infraSnapClient = snapClient result.infraLabelMap = infraClusterLabelMap result.volumePrefix = fmt.Sprintf("%s-", prefix) result.storageClassEnforcement = storageClassEnforcement + result.tenantKubernetesClient = tenantKubernetesClient + storageSnapshotMapping, err := result.buildStorageClassSnapshotClassMapping(tenantKubernetesClient, tenantSnapshotClient, storageClassEnforcement.StorageSnapshotMapping) + if err != nil { + return nil, err + } + result.infraTenantStorageSnapshotMapping = storageSnapshotMapping return result, nil } @@ -223,7 +243,7 @@ func (c *client) CreateDataVolume(ctx context.Context, namespace string, dataVol // Ping performs a minimal request to the infra-cluster k8s api func (c *client) Ping(ctx context.Context) error { - _, err := c.kubernetesClient.Discovery().ServerVersion() + _, err := c.infraKubernetesClient.Discovery().ServerVersion() return err } @@ -255,8 +275,8 @@ func (c *client) GetDataVolume(ctx context.Context, namespace string, name strin func (c *client) CreateVolumeSnapshot(ctx context.Context, namespace, name, claimName, snapshotClassName string) (*snapshotv1.VolumeSnapshot, error) { if dv, err := c.GetDataVolume(ctx, namespace, claimName); err != nil { return nil, err - } else if dv != nil { - snapshotClassName, err := c.getSnapshotClassNameFromVolumeClaimName(ctx, namespace, dv.GetName(), snapshotClassName) + } else { + snapshotClassNameFromStorage, err := c.getSnapshotClassNameFromVolumeClaimName(ctx, namespace, dv.GetName(), snapshotClassName) if err != nil { return nil, err } @@ -270,64 +290,81 @@ func (c *client) CreateVolumeSnapshot(ctx context.Context, namespace, name, clai Source: snapshotv1.VolumeSnapshotSource{ PersistentVolumeClaimName: &claimName, }, - VolumeSnapshotClassName: &snapshotClassName, }, } - klog.V(5).Infof("Creating snapshot %s with snapshot class %s, %#v", name, snapshotClassName, snapshot) - return c.snapClient.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, snapshot, metav1.CreateOptions{}) + // If the snapshot class is not found (blank), use 'default snapshost class' for infra cluster + // that is associated with the storage class provider. + if snapshotClassNameFromStorage != "" { + snapshot.Spec.VolumeSnapshotClassName = &snapshotClassNameFromStorage + } + klog.V(5).Infof("Creating snapshot %s with snapshot class [%s], %#v", name, snapshotClassName, snapshot) + return c.infraSnapClient.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, snapshot, metav1.CreateOptions{}) } - return nil, nil } func (c *client) getSnapshotClassNameFromVolumeClaimName(ctx context.Context, namespace, claimName, snapshotClassName string) (string, error) { storageClassName, err := c.getStorageClassNameFromClaimName(ctx, namespace, claimName) if err != nil { klog.V(2).Infof("Error getting storage class name for claim %s in namespace %s: %v", claimName, namespace, err) - return "", fmt.Errorf("unable to determine snapshot class name for infra source volume") - } - if storageClassName == "" { - return "", fmt.Errorf("unable to determine storage class name for snapshot creation") - } - allowed, err := c.isStorageClassAllowed(ctx, storageClassName) - if err != nil { - return "", err - } - if !allowed { - return "", fmt.Errorf("storage class %s is not allowed for snapshot creation", storageClassName) - } - snapshotClass, err := c.getSnapshotClassFromStorageClass(ctx, storageClassName, snapshotClassName) - if err != nil { - return "", err + return "", fmt.Errorf("unable to determine volume snapshot class name for infra source volume") + } + if storageClassName == "" && !c.storageClassEnforcement.AllowDefault { + return "", fmt.Errorf("unable to determine volume snapshot class name for snapshot creation, and default not allowed") + } else if storageClassName != "" && !(util.Contains(c.storageClassEnforcement.AllowList, storageClassName) || c.storageClassEnforcement.AllowAll) { + return "", fmt.Errorf("unable to determine volume snapshot class name for snapshot creation, no valid snapshot classes found") + } + snapshotClassNames := c.getInfraSnapshotClassesFromInfraStorageClassName(storageClassName) + if util.Contains(snapshotClassNames, snapshotClassName) { + return snapshotClassName, nil + } + if !(c.storageClassEnforcement.AllowAll || c.storageClassEnforcement.AllowDefault) { + tenantSnapshotClasses := c.getTenantSnapshotClassesFromInfraStorageClassName(storageClassName) + if len(tenantSnapshotClasses) > 0 { + if snapshotClassName == "" { + return "", fmt.Errorf("unable to determine volume snapshot class name for snapshot creation, valid snapshot classes are %v", tenantSnapshotClasses) + } else { + return "", fmt.Errorf("volume snapshot class %s is not compatible with PVC with storage class %s, valid snapshot classes for this pvc are %v", snapshotClassName, storageClassName, tenantSnapshotClasses) + } + } else { + return "", fmt.Errorf("unable to determine volume snapshot class name for snapshot creation, no valid snapshot classes found") + } } - return snapshotClass.Name, nil + return "", nil } -func (c *client) isStorageClassAllowed(ctx context.Context, storageClassName string) (bool, error) { - if !c.storageClassEnforcement.AllowAll && !util.Contains(c.storageClassEnforcement.AllowList, storageClassName) { - if c.storageClassEnforcement.AllowDefault { - // Check if storage class is default and default is allowed. - storageClass, err := c.kubernetesClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - return false, nil - } else { - return false, err +func (c *client) getInfraSnapshotClassesFromInfraStorageClassName(storageClassName string) []string { + for _, storageSnapshotMapping := range c.infraTenantStorageSnapshotMapping { + for _, storageClass := range storageSnapshotMapping.StorageClasses { + if storageClassName == storageClass { + infraSnapshotClasses := []string{} + for _, snapshotClasses := range storageSnapshotMapping.VolumeSnapshotClasses { + infraSnapshotClasses = append(infraSnapshotClasses, snapshotClasses.Infra) } + return infraSnapshotClasses } - ann := storageClass.GetAnnotations() - if v, ok := ann["storageclass.kubernetes.io/is-default-class"]; !ok || v != "true" { - return false, nil + } + } + return nil +} + +func (c *client) getTenantSnapshotClassesFromInfraStorageClassName(storageClassName string) []string { + for _, storageSnapshotMapping := range c.infraTenantStorageSnapshotMapping { + for _, storageClass := range storageSnapshotMapping.StorageClasses { + if storageClassName == storageClass { + tenantSnapshotClasses := []string{} + for _, snapshotClasses := range storageSnapshotMapping.VolumeSnapshotClasses { + tenantSnapshotClasses = append(tenantSnapshotClasses, snapshotClasses.Tenant) + } + return tenantSnapshotClasses } - } else { - return false, nil } } - return true, nil + return nil } // Determine the name of the volume associated with the passed in claim name func (c *client) getStorageClassNameFromClaimName(ctx context.Context, namespace, claimName string) (string, error) { - volumeClaim, err := c.kubernetesClient.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, claimName, metav1.GetOptions{}) + volumeClaim, err := c.infraKubernetesClient.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, claimName, metav1.GetOptions{}) if err != nil { klog.Errorf("Error getting volume claim %s in namespace %s: %v", claimName, namespace, err) return "", err @@ -340,56 +377,8 @@ func (c *client) getStorageClassNameFromClaimName(ctx context.Context, namespace return storageClassName, nil } -// Get the associated snapshot class based on the storage class the following logic is used: -// 1. If the snapshot class is provided AND the provisioner string matches, return that. -// 2. If the snapshot class is empty, find the snapshot classes associated with provisioner string. -// 3. Based on those snapshot classes use the one marked as default if set. -// 4. If no default is set return the first one. -func (c *client) getSnapshotClassFromStorageClass(ctx context.Context, storageClassName, volumeSnapshotClassName string) (*snapshotv1.VolumeSnapshotClass, error) { - storageClass, err := c.kubernetesClient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) - if err != nil { - klog.V(2).Infof("Error getting storage class %s: %v", storageClassName, err) - return nil, err - } - provisioner := storageClass.Provisioner - snapshotClasses, err := c.snapClient.SnapshotV1().VolumeSnapshotClasses().List(ctx, metav1.ListOptions{}) - if errors.IsNotFound(err) { - klog.V(5).Info("No snapshot classes found") - return nil, nil - } else if err != nil { - klog.V(2).Infof("Error getting snapshot classes: %v", err) - return nil, err - } - var storageClassSnapshotClasses []snapshotv1.VolumeSnapshotClass - for _, snapshotClass := range snapshotClasses.Items { - if snapshotClass.Driver == provisioner { - storageClassSnapshotClasses = append(storageClassSnapshotClasses, snapshotClass) - } - } - - var bestMatch *snapshotv1.VolumeSnapshotClass - for i, snapshotClass := range storageClassSnapshotClasses { - klog.V(5).Infof("Checking snapshot class %#v", snapshotClass) - if i == 0 { - bestMatch = &storageClassSnapshotClasses[i] - } - if snapshotClass.Name == volumeSnapshotClassName { - return &snapshotClass, nil - } - ann := snapshotClass.GetAnnotations() - if ann != nil && ann[annDefaultSnapshotClass] == "true" { - bestMatch = &storageClassSnapshotClasses[i] - } - } - if volumeSnapshotClassName != "" { - klog.V(2).Infof("provided volume snapshot class %s cannot be matched with storage class", volumeSnapshotClassName) - return nil, fmt.Errorf("provided volume snapshot class cannot be matched with storage class") - } - return bestMatch, nil -} - func (c *client) GetVolumeSnapshot(ctx context.Context, namespace, name string) (*snapshotv1.VolumeSnapshot, error) { - s, err := c.snapClient.SnapshotV1().VolumeSnapshots(namespace).Get(ctx, name, metav1.GetOptions{}) + s, err := c.infraSnapClient.SnapshotV1().VolumeSnapshots(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -409,7 +398,7 @@ func (c *client) DeleteVolumeSnapshot(ctx context.Context, namespace, name strin if err != nil { return err } - return c.snapClient.SnapshotV1().VolumeSnapshots(s.GetNamespace()).Delete(ctx, s.GetName(), metav1.DeleteOptions{}) + return c.infraSnapClient.SnapshotV1().VolumeSnapshots(s.GetNamespace()).Delete(ctx, s.GetName(), metav1.DeleteOptions{}) } func (c *client) ListVolumeSnapshots(ctx context.Context, namespace string) (*snapshotv1.VolumeSnapshotList, error) { @@ -417,10 +406,51 @@ func (c *client) ListVolumeSnapshots(ctx context.Context, namespace string) (*sn if err != nil { return nil, err } - return c.snapClient.SnapshotV1().VolumeSnapshots(namespace).List(ctx, metav1.ListOptions{ + return c.infraSnapClient.SnapshotV1().VolumeSnapshots(namespace).List(ctx, metav1.ListOptions{ LabelSelector: sl.String(), }) } +func (c *client) buildStorageClassSnapshotClassMapping(k8sClient kubernetes.Interface, snapshotClient snapcli.Interface, infraStorageSnapMapping []util.StorageSnapshotMapping) ([]InfraTenantStorageSnapshotMapping, error) { + provisionerMapping := make([]InfraTenantStorageSnapshotMapping, len(infraStorageSnapMapping)) + + volumeSnapshotClassList, err := snapshotClient.SnapshotV1().VolumeSnapshotClasses().List(context.Background(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for i, storageSnapshotMapping := range infraStorageSnapMapping { + mapping := &InfraTenantStorageSnapshotMapping{ + StorageClasses: storageSnapshotMapping.StorageClasses, + } + mapping = appendVolumeSnapshotInfraTenantMapping(mapping, storageSnapshotMapping.VolumeSnapshotClasses, volumeSnapshotClassList.Items) + provisionerMapping[i] = *mapping + } + + return provisionerMapping, nil +} + +func appendVolumeSnapshotInfraTenantMapping(mapping *InfraTenantStorageSnapshotMapping, infraVolumeSnapshotClasses []string, tenantVolumeSnapshotClasses []snapshotv1.VolumeSnapshotClass) *InfraTenantStorageSnapshotMapping { + for _, infraVolumeSnapshotClass := range infraVolumeSnapshotClasses { + tenantVolumeSnapshotClassName := "" + for _, tenantVolumeSnapshotClass := range tenantVolumeSnapshotClasses { + if infraVolumeSnapshotClassName, ok := tenantVolumeSnapshotClass.Parameters[InfraSnapshotClassNameParameter]; !ok { + klog.V(4).Infof("volume snapshot class %s does not have infraSnapshotClassName parameter", tenantVolumeSnapshotClass.Name) + continue + } else { + if infraVolumeSnapshotClassName == infraVolumeSnapshotClass { + tenantVolumeSnapshotClassName = tenantVolumeSnapshotClass.Name + break + } + } + } + mapping.VolumeSnapshotClasses = append(mapping.VolumeSnapshotClasses, InfraToTenantMapping{ + Infra: infraVolumeSnapshotClass, + Tenant: tenantVolumeSnapshotClassName, + }) + } + return mapping +} + var ErrInvalidSnapshot = goerrors.New("invalid snapshot name") var ErrInvalidVolume = goerrors.New("invalid volume name") diff --git a/pkg/kubevirt/client_test.go b/pkg/kubevirt/client_test.go index 0ea08eb6..8cb35999 100644 --- a/pkg/kubevirt/client_test.go +++ b/pkg/kubevirt/client_test.go @@ -16,28 +16,34 @@ import ( "k8s.io/utils/ptr" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" cdicli "kubevirt.io/csi-driver/pkg/generated/containerized-data-importer/client-go/clientset/versioned/fake" + snapcli "kubevirt.io/csi-driver/pkg/generated/external-snapshotter/client-go/clientset/versioned" snapfake "kubevirt.io/csi-driver/pkg/generated/external-snapshotter/client-go/clientset/versioned/fake" "kubevirt.io/csi-driver/pkg/util" ) const ( - defaultStorageClassName = "default-storage-class" - storageClassName = "test-storage-class" - volumeSnapshotClassName = "test-volume-snapshot-class" - provisioner = "test-provisioner" - nonMatchingProvisioner = "non-matching-provisioner-snapshot-class" - otherprovisioner = "other-provisioner" - otherVolumeSnapshotClassName = "other-volume-snapshot-class" - testVolumeName = "test-volume" - testVolumeNameNotAllowed = "test-volume-not-allowed" - validDataVolume = "pvc-valid-data-volume" - nolabelDataVolume = "nolabel-data-volume" - testClaimName = "pvc-valid-data-volume" - testClaimName2 = "pvc-valid-data-volume2" - testClaimName3 = "pvc-valid-data-volume3" - testClaimName4 = "pvc-default-storage-class" - testNamespace = "test-namespace" - unboundTestClaimName = "unbound-test-claim" + defaultStorageClassName = "default-storage-class" + tenantStorageClassName = "tenant-storage-class" + storageClassName = "test-storage-class" + tenantVolumeSnapshotClassName = "tenant-volume-snapshot-class" + volumeSnapshotClassName = "test-volume-snapshot-class" + provisioner = "test-provisioner" + nonMatchingProvisioner = "non-matching-provisioner-snapshot-class" + otherprovisioner = "other-provisioner" + otherVolumeSnapshotClassName = "other-volume-snapshot-class" + testVolumeName = "test-volume" + testVolumeNameNotAllowed = "test-volume-not-allowed" + validDataVolume = "pvc-valid-data-volume" + nolabelDataVolume = "nolabel-data-volume" + testClaimName = "pvc-valid-data-volume" + testClaimName2 = "pvc-valid-data-volume2" + testClaimNameNotAllowed = "pvc-valid-data-volume3" + testClaimNameDefault = "pvc-default-storage-class" + testNamespace = "test-namespace" + unboundTestClaimName = "unbound-test-claim" + snapshotClassNotFoundNoDefault = "unable to determine volume snapshot class name for snapshot creation, and default not allowed" + snapshotClassNotFound = "unable to determine volume snapshot class name for snapshot creation, no valid snapshot classes found" + snapshotClassNotFoundSuggestion = "volume snapshot class other-volume-snapshot-class is not compatible with PVC with storage class test-storage-class, valid snapshot classes for this pvc are [tenant-volume-snapshot-class]" ) var _ = Describe("Client", func() { @@ -108,21 +114,6 @@ var _ = Describe("Client", func() { c = NewFakeClient() }) - DescribeTable("should return volume snapshot class or error", func(storageClassName, volumeSnapshotClassName, resultSnapshotClassName string, expectedError bool) { - res, err := c.getSnapshotClassFromStorageClass(context.TODO(), storageClassName, volumeSnapshotClassName) - if expectedError { - Expect(err).To(HaveOccurred()) - } else { - Expect(err).ToNot(HaveOccurred()) - Expect(res.Name).To(Equal(resultSnapshotClassName)) - } - }, - Entry("should return volume snapshot class", storageClassName, volumeSnapshotClassName, volumeSnapshotClassName, false), - Entry("should return default snapshot class", storageClassName, "", otherVolumeSnapshotClassName, false), - Entry("should return error with non existing storage class", "non-existing-storage-class", "", "", true), - Entry("should return error when provider doesn't match", storageClassName, nonMatchingProvisioner, "", true), - ) - It("storage class from claim should return a storage class name", func() { storageClassName, err := c.getStorageClassNameFromClaimName(context.TODO(), testNamespace, testClaimName) Expect(err).ToNot(HaveOccurred()) @@ -135,13 +126,19 @@ var _ = Describe("Client", func() { Expect(volumeName).To(Equal("")) }) - It("snapshot class from claim name should return error if claim has nil storage class", func() { - volumeName, err := c.getSnapshotClassNameFromVolumeClaimName(context.TODO(), testNamespace, testClaimName4, volumeSnapshotClassName) + It("snapshot class from claim name should return error if claim has nil storage class, and not allow default", func() { + c.storageClassEnforcement.AllowDefault = false + volumeName, err := c.getSnapshotClassNameFromVolumeClaimName(context.TODO(), testNamespace, testClaimNameDefault, volumeSnapshotClassName) Expect(err).To(HaveOccurred()) Expect(volumeName).To(Equal("")) }) DescribeTable("should return snapshot class from claim or error", func(claimName, namespace, snapshotClassName, resultSnapshotClassName string, expectedError bool) { + c.storageClassEnforcement = createDefaultStorageClassEnforcement() + fakeTenantSnapClient := snapfake.NewSimpleClientset() + mapping, err := c.buildStorageClassSnapshotClassMapping(c.tenantKubernetesClient, fakeTenantSnapClient, c.storageClassEnforcement.StorageSnapshotMapping) + Expect(err).ToNot(HaveOccurred()) + c.infraTenantStorageSnapshotMapping = mapping res, err := c.getSnapshotClassNameFromVolumeClaimName(context.TODO(), namespace, claimName, snapshotClassName) if expectedError { Expect(err).To(HaveOccurred()) @@ -156,13 +153,13 @@ var _ = Describe("Client", func() { ) It("should return error if the storage class is not allowed", func() { - res, err := c.getSnapshotClassNameFromVolumeClaimName(context.TODO(), testNamespace, testClaimName3, volumeSnapshotClassName) + res, err := c.getSnapshotClassNameFromVolumeClaimName(context.TODO(), testNamespace, testClaimNameNotAllowed, volumeSnapshotClassName) Expect(err).To(HaveOccurred()) Expect(res).To(Equal("")) - Expect(err.Error()).To(ContainSubstring("not allowed for snapshot creation")) + Expect(err.Error()).To(ContainSubstring(snapshotClassNotFound)) }) - It("should return error if the storage class is not allowed", func() { + It("should return not error if the storage class is not allowed, but allowAll is true", func() { c.storageClassEnforcement.AllowAll = true c.storageClassEnforcement.AllowList = nil _, err := c.getSnapshotClassNameFromVolumeClaimName(context.TODO(), testNamespace, testClaimName, volumeSnapshotClassName) @@ -177,10 +174,27 @@ var _ = Describe("Client", func() { }) It("should return error if the volume snapshot class is not found", func() { + c.storageClassEnforcement.AllowDefault = false s, err := c.CreateVolumeSnapshot(context.TODO(), testNamespace, "snap", validDataVolume, "non-existing-snapshot-class") Expect(err).To(HaveOccurred()) Expect(s).To(BeNil()) - Expect(err.Error()).To(ContainSubstring("provided volume snapshot class cannot be matched with storage class")) + Expect(err.Error()).To(ContainSubstring(snapshotClassNotFound)) + }) + + It("should return error if the volume snapshot class is not found, and passed in value is empty, and allowDefault = false", func() { + c.storageClassEnforcement.AllowDefault = false + s, err := c.CreateVolumeSnapshot(context.TODO(), testNamespace, "snap", validDataVolume, "") + Expect(err).To(HaveOccurred()) + Expect(s).To(BeNil()) + Expect(err.Error()).To(ContainSubstring(snapshotClassNotFound)) + }) + + It("should return nil with snapshot if the volume snapshot class is not found, and passed in value is empty, and allowDefault = true", func() { + c.storageClassEnforcement.AllowDefault = true + s, err := c.CreateVolumeSnapshot(context.TODO(), testNamespace, "snap", validDataVolume, "") + Expect(err).ToNot(HaveOccurred()) + Expect(s).ToNot(BeNil()) + Expect(s.Spec.VolumeSnapshotClassName).To(BeNil()) }) It("should return error if the DV is not found", func() { @@ -191,6 +205,11 @@ var _ = Describe("Client", func() { }) It("should delete volumesnapshot if it exists and it valid", func() { + c.storageClassEnforcement = createDefaultStorageClassEnforcement() + fakeTenantSnapClient := snapfake.NewSimpleClientset() + mapping, err := c.buildStorageClassSnapshotClassMapping(c.tenantKubernetesClient, fakeTenantSnapClient, c.storageClassEnforcement.StorageSnapshotMapping) + Expect(err).ToNot(HaveOccurred()) + c.infraTenantStorageSnapshotMapping = mapping s, err := c.CreateVolumeSnapshot(context.TODO(), testNamespace, "snap", validDataVolume, volumeSnapshotClassName) Expect(err).ToNot(HaveOccurred()) Expect(s.Name).To(Equal("snap")) @@ -204,6 +223,11 @@ var _ = Describe("Client", func() { }) It("should return error if get volume returns an error", func() { + c.storageClassEnforcement = createDefaultStorageClassEnforcement() + fakeTenantSnapClient := snapfake.NewSimpleClientset() + mapping, err := c.buildStorageClassSnapshotClassMapping(c.tenantKubernetesClient, fakeTenantSnapClient, c.storageClassEnforcement.StorageSnapshotMapping) + Expect(err).ToNot(HaveOccurred()) + c.infraTenantStorageSnapshotMapping = mapping s, err := c.CreateVolumeSnapshot(context.TODO(), testNamespace, "snap", validDataVolume, volumeSnapshotClassName) Expect(err).ToNot(HaveOccurred()) Expect(s.Name).To(Equal("snap")) @@ -213,6 +237,11 @@ var _ = Describe("Client", func() { }) It("should properly list snapshots", func() { + c.storageClassEnforcement = createDefaultStorageClassEnforcement() + fakeTenantSnapClient := snapfake.NewSimpleClientset() + mapping, err := c.buildStorageClassSnapshotClassMapping(c.tenantKubernetesClient, fakeTenantSnapClient, c.storageClassEnforcement.StorageSnapshotMapping) + Expect(err).ToNot(HaveOccurred()) + c.infraTenantStorageSnapshotMapping = mapping s, err := c.CreateVolumeSnapshot(context.TODO(), testNamespace, "snap", validDataVolume, volumeSnapshotClassName) Expect(err).ToNot(HaveOccurred()) Expect(s.Name).To(Equal("snap")) @@ -233,20 +262,55 @@ var _ = Describe("Client", func() { c = NewFakeClient() }) - DescribeTable("should properly calculate if storage class is allowed", func(storageClassName string, enforcement util.StorageClassEnforcement, expected bool) { + DescribeTable("should properly determine snapshot class from storage class", func(snapshotClassName, claimName string, enforcement util.StorageClassEnforcement, tenantSnapClient snapcli.Interface, expected, expectedError string) { c.storageClassEnforcement = enforcement - res, err := c.isStorageClassAllowed(context.TODO(), storageClassName) + mapping, err := c.buildStorageClassSnapshotClassMapping(c.tenantKubernetesClient, tenantSnapClient, c.storageClassEnforcement.StorageSnapshotMapping) Expect(err).ToNot(HaveOccurred()) + c.infraTenantStorageSnapshotMapping = mapping + res, err := c.getSnapshotClassNameFromVolumeClaimName(context.TODO(), testNamespace, claimName, snapshotClassName) + if expectedError != "" { + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring(expectedError)) + } else { + Expect(err).ToNot(HaveOccurred()) + } Expect(res).To(Equal(expected)) }, - Entry("should return true if storage class is in allowedList", storageClassName, - util.StorageClassEnforcement{AllowList: []string{storageClassName}}, true), - Entry("should return false if storage class is not in allowedList", storageClassName, - util.StorageClassEnforcement{AllowList: []string{}}, false), - Entry("should return true if default but not in allowedList", defaultStorageClassName, - util.StorageClassEnforcement{AllowList: []string{}, AllowDefault: true}, true), - Entry("should return false if not default and not in allowedList", storageClassName, - util.StorageClassEnforcement{AllowList: []string{}, AllowDefault: true}, false), + Entry("should return snapshot class if storage class is in allowedList", + volumeSnapshotClassName, + testClaimName, + createDefaultStorageClassEnforcement(), + snapfake.NewSimpleClientset(), + volumeSnapshotClassName, + ""), + Entry("should return blank if storage class is not in allowedList", + volumeSnapshotClassName, + testClaimNameNotAllowed, + createDefaultStorageClassEnforcement(), + snapfake.NewSimpleClientset(), + "", + snapshotClassNotFound), + Entry("should return blank and no error if AllowDefault but not in allowedList", + volumeSnapshotClassName, + testClaimNameDefault, + createAllowDefaultStorageClassEnforcement(), + snapfake.NewSimpleClientset(), + "", + ""), + Entry("should return error if not in allowedList", + volumeSnapshotClassName, + testClaimNameDefault, + createDefaultStorageClassEnforcement(), + snapfake.NewSimpleClientset(), + "", + snapshotClassNotFoundNoDefault), + Entry("should return error with suggestion if not in allowedList, but valid snapshot classes exist", + otherVolumeSnapshotClassName, + testClaimName, + createDefaultStorageClassEnforcement(), + snapfake.NewSimpleClientset(createTenantVolumeSnapshotClass(tenantVolumeSnapshotClassName, "csi.kubevirt.io", volumeSnapshotClassName), createVolumeSnapshotClass("no-parameter", "ceph.csi.io", false)), + "", + snapshotClassNotFoundSuggestion), ) }) @@ -265,8 +329,8 @@ func NewFakeClient() *client { testVolumeNotAllowed := createPersistentVolume(testVolumeNameNotAllowed, "not-allowed-storage-class") testClaim := createPersistentVolumeClaim(testClaimName, testVolumeName, ptr.To[string](storageClassName)) testClaim2 := createPersistentVolumeClaim(testClaimName2, "testVolumeName2", ptr.To[string](storageClassName)) - testClaim3 := createPersistentVolumeClaim(testClaimName3, testVolumeNameNotAllowed, ptr.To[string]("not-allowed-storage-class")) - testClaimDefault := createPersistentVolumeClaim(testClaimName4, testVolumeName, nil) + testClaim3 := createPersistentVolumeClaim(testClaimNameNotAllowed, testVolumeNameNotAllowed, ptr.To[string]("not-allowed-storage-class")) + testClaimDefault := createPersistentVolumeClaim(testClaimNameDefault, testVolumeName, nil) unboundClaim := &k8sv1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: unboundTestClaimName, @@ -279,16 +343,18 @@ func NewFakeClient() *client { fakeK8sClient := k8sfake.NewSimpleClientset(storageClass, defaultStorageClass, testVolume, testVolumeNotAllowed, testClaim, testClaim2, testClaim3, unboundClaim, testClaimDefault) + fakeTenantK8sClient := k8sfake.NewSimpleClientset(createTenantStorageClass(tenantStorageClassName, "csi.kubevirt.io", storageClassName), createStorageClass("no-parameter-storage-class", "test.io", false)) fakeSnapClient := snapfake.NewSimpleClientset( createVolumeSnapshotClass(volumeSnapshotClassName, provisioner, false), createVolumeSnapshotClass(nonMatchingProvisioner, otherprovisioner, false), createVolumeSnapshotClass(otherVolumeSnapshotClassName, provisioner, true), ) result := &client{ - kubernetesClient: fakeK8sClient, - snapClient: fakeSnapClient, - infraLabelMap: map[string]string{"test": "test"}, - volumePrefix: "pvc-", + infraKubernetesClient: fakeK8sClient, + tenantKubernetesClient: fakeTenantK8sClient, + infraSnapClient: fakeSnapClient, + infraLabelMap: map[string]string{"test": "test"}, + volumePrefix: "pvc-", storageClassEnforcement: util.StorageClassEnforcement{ AllowList: []string{storageClassName}, AllowAll: false, @@ -313,6 +379,19 @@ func createVolumeSnapshotClass(name, provisioner string, isDefault bool) *snapsh return res } +func createTenantVolumeSnapshotClass(name, provisioner, infraSnapshotClassName string) *snapshotv1.VolumeSnapshotClass { + res := &snapshotv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Driver: provisioner, + Parameters: map[string]string{ + InfraSnapshotClassNameParameter: infraSnapshotClassName, + }, + } + return res +} + func createPersistentVolume(name, storageClassName string) *k8sv1.PersistentVolume { return &k8sv1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ @@ -356,6 +435,19 @@ func createStorageClass(name, provisioner string, isDefault bool) *storagev1.Sto return res } +func createTenantStorageClass(name, provisioner, infraStorageClassName string) *storagev1.StorageClass { + res := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Provisioner: provisioner, + Parameters: map[string]string{ + InfraStorageClassNameParameter: infraStorageClassName, + }, + } + return res +} + func createDataVolume(name string, labels map[string]string) *cdiv1.DataVolume { return &cdiv1.DataVolume{ ObjectMeta: metav1.ObjectMeta{ @@ -378,3 +470,27 @@ func createNoLabelDataVolume() *cdiv1.DataVolume { func createWrongPrefixDataVolume() *cdiv1.DataVolume { return createDataVolume(testVolumeName, map[string]string{"test": "test"}) } + +func createDefaultStorageClassEnforcement() util.StorageClassEnforcement { + return util.StorageClassEnforcement{ + AllowList: []string{storageClassName}, + AllowAll: false, + StorageSnapshotMapping: []util.StorageSnapshotMapping{ + { + StorageClasses: []string{ + storageClassName, + }, + VolumeSnapshotClasses: []string{ + volumeSnapshotClassName, + }, + }, + }, + } +} + +func createAllowDefaultStorageClassEnforcement() util.StorageClassEnforcement { + return util.StorageClassEnforcement{ + AllowAll: false, + AllowDefault: true, + } +} diff --git a/pkg/service/controller.go b/pkg/service/controller.go index ede46c4e..5efba344 100644 --- a/pkg/service/controller.go +++ b/pkg/service/controller.go @@ -27,11 +27,9 @@ import ( ) const ( - infraStorageClassNameParameter = "infraStorageClassName" - infraSnapshotClassNameParameter = "infraSnapshotClassName" - busParameter = "bus" - busDefaultValue = kubevirtv1.DiskBus("scsi") - serialParameter = "serial" + busParameter = "bus" + busDefaultValue = kubevirtv1.DiskBus("scsi") + serialParameter = "serial" ) var ( @@ -77,7 +75,7 @@ func (c *ControllerService) validateCreateVolumeRequest(req *csi.CreateVolumeReq return isRWX, nil } - storageClassName := req.Parameters[infraStorageClassNameParameter] + storageClassName := req.Parameters[client.InfraStorageClassNameParameter] if storageClassName == "" { if c.storageClassEnforcement.AllowDefault { return isRWX, nil @@ -126,7 +124,7 @@ func (c *ControllerService) CreateVolume(ctx context.Context, req *csi.CreateVol } // Prepare parameters for the DataVolume - storageClassName := req.Parameters[infraStorageClassNameParameter] + storageClassName := req.Parameters[client.InfraStorageClassNameParameter] storageSize := req.GetCapacityRange().GetRequiredBytes() dvName := req.Name value, ok := req.Parameters[busParameter] @@ -552,7 +550,7 @@ func (c *ControllerService) CreateSnapshot(ctx context.Context, req *csi.CreateS return nil, status.Errorf(codes.NotFound, "source volume %s not found", req.GetSourceVolumeId()) } // Prepare parameters for the DataVolume - snapshotClassName := req.Parameters[infraSnapshotClassNameParameter] + snapshotClassName := req.Parameters[client.InfraSnapshotClassNameParameter] volumeSnapshot, err := c.virtClient.CreateVolumeSnapshot(ctx, c.infraClusterNamespace, req.GetName(), req.GetSourceVolumeId(), snapshotClassName) if err != nil { return nil, err diff --git a/pkg/service/controller_test.go b/pkg/service/controller_test.go index fb98364e..c43926a6 100644 --- a/pkg/service/controller_test.go +++ b/pkg/service/controller_test.go @@ -20,6 +20,7 @@ import ( kubevirtv1 "kubevirt.io/api/core/v1" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + client "kubevirt.io/csi-driver/pkg/kubevirt" "kubevirt.io/csi-driver/pkg/util" . "github.com/onsi/ginkgo/v2" @@ -116,16 +117,16 @@ var _ = Describe("CreateVolume", func() { }) It("should not allow storage class not in the allow list", func() { - client := &ControllerClientMock{} + cli := &ControllerClientMock{} storageClassEnforcement = util.StorageClassEnforcement{ AllowList: []string{"allowedClass"}, AllowAll: false, AllowDefault: true, } - controller := ControllerService{client, testInfraNamespace, testInfraLabels, storageClassEnforcement} + controller := ControllerService{cli, testInfraNamespace, testInfraLabels, storageClassEnforcement} request := getCreateVolumeRequest(getVolumeCapability(corev1.PersistentVolumeFilesystem, csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER)) - request.Parameters[infraStorageClassNameParameter] = "notAllowedClass" + request.Parameters[client.InfraStorageClassNameParameter] = "notAllowedClass" _, err := controller.CreateVolume(context.TODO(), request) Expect(err).To(HaveOccurred()) @@ -547,7 +548,7 @@ func getVolumeCapability(volumeMode corev1.PersistentVolumeMode, accessModes csi func getCreateVolumeRequest(volumeCapability *csi.VolumeCapability) *csi.CreateVolumeRequest { parameters := map[string]string{} if testInfraStorageClassName != "" { - parameters[infraStorageClassNameParameter] = testInfraStorageClassName + parameters[client.InfraStorageClassNameParameter] = testInfraStorageClassName } if testBusType != nil { parameters[busParameter] = string(*testBusType) diff --git a/pkg/service/identity.go b/pkg/service/identity.go index b9f1cc04..638b6d89 100644 --- a/pkg/service/identity.go +++ b/pkg/service/identity.go @@ -28,12 +28,12 @@ func NewIdentityService(clientset kubernetes.Interface) *IdentityService { } } -//IdentityService of kubevirt-csi-driver +// IdentityService of kubevirt-csi-driver type IdentityService struct { connectivityProbe connectivityProbeInterface } -//GetPluginInfo returns the vendor name and version - set in build time +// GetPluginInfo returns the vendor name and version - set in build time func (i *IdentityService) GetPluginInfo(context.Context, *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { return &csi.GetPluginInfoResponse{ Name: VendorName, @@ -41,7 +41,7 @@ func (i *IdentityService) GetPluginInfo(context.Context, *csi.GetPluginInfoReque }, nil } -//GetPluginCapabilities declares the plugins capabilities +// GetPluginCapabilities declares the plugins capabilities func (i *IdentityService) GetPluginCapabilities(context.Context, *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { return &csi.GetPluginCapabilitiesResponse{ Capabilities: []*csi.PluginCapability{ diff --git a/pkg/util/util.go b/pkg/util/util.go index 1c956eaa..9bb52404 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -1,9 +1,15 @@ package util type StorageClassEnforcement struct { - AllowList []string `yaml:"allowList"` - AllowAll bool `yaml:"allowAll"` - AllowDefault bool `yaml:"allowDefault"` + AllowList []string `yaml:"allowList"` + AllowAll bool `yaml:"allowAll"` + AllowDefault bool `yaml:"allowDefault"` + StorageSnapshotMapping []StorageSnapshotMapping `yaml:"storageSnapshotMapping,omitempty"` +} + +type StorageSnapshotMapping struct { + VolumeSnapshotClasses []string `yaml:"volumeSnapshotClasses,omitempty"` + StorageClasses []string `yaml:"storageClasses"` } // Contains tells whether a contains x. diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go index a6f974d6..e2c54111 100644 --- a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go @@ -106,6 +106,9 @@ type StorageSpec struct { DataSourceRef *corev1.TypedObjectReference `json:"dataSourceRef,omitempty"` } +// PersistentVolumeFromStorageProfile means the volume mode will be auto selected by CDI according to a matching StorageProfile +const PersistentVolumeFromStorageProfile corev1.PersistentVolumeMode = "FromStorageProfile" + // DataVolumeCheckpoint defines a stage in a warm migration. type DataVolumeCheckpoint struct { // Previous is the identifier of the snapshot from the previous checkpoint. @@ -418,6 +421,7 @@ type StorageProfileSpec struct { // CloneStrategy defines the preferred method for performing a CDI clone CloneStrategy *CDICloneStrategy `json:"cloneStrategy,omitempty"` // ClaimPropertySets is a provided set of properties applicable to PVC + // +kubebuilder:validation:MaxItems=8 ClaimPropertySets []ClaimPropertySet `json:"claimPropertySets,omitempty"` // DataImportCronSourceFormat defines the format of the DataImportCron-created disk image sources DataImportCronSourceFormat *DataImportCronSourceFormat `json:"dataImportCronSourceFormat,omitempty"` @@ -434,6 +438,7 @@ type StorageProfileStatus struct { // CloneStrategy defines the preferred method for performing a CDI clone CloneStrategy *CDICloneStrategy `json:"cloneStrategy,omitempty"` // ClaimPropertySets computed from the spec and detected in the system + // +kubebuilder:validation:MaxItems=8 ClaimPropertySets []ClaimPropertySet `json:"claimPropertySets,omitempty"` // DataImportCronSourceFormat defines the format of the DataImportCron-created disk image sources DataImportCronSourceFormat *DataImportCronSourceFormat `json:"dataImportCronSourceFormat,omitempty"` @@ -445,12 +450,13 @@ type StorageProfileStatus struct { type ClaimPropertySet struct { // AccessModes contains the desired access modes the volume should have. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - // +optional - AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // +kubebuilder:validation:MaxItems=4 + // +kubebuilder:validation:XValidation:rule="self.all(am, am in ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany', 'ReadWriteOncePod'])", message="Illegal AccessMode" + AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes"` // VolumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. - // +optional - VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` + // +kubebuilder:validation:Enum="Block";"Filesystem" + VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode"` } // StorageProfileList provides the needed parameters to request a list of StorageProfile from the system @@ -824,10 +830,11 @@ type CDISpec struct { // +kubebuilder:validation:Enum=RemoveWorkloads;BlockUninstallIfWorkloadsExist // CDIUninstallStrategy defines the state to leave CDI on uninstall UninstallStrategy *CDIUninstallStrategy `json:"uninstallStrategy,omitempty"` - // Rules on which nodes CDI infrastructure pods will be scheduled - Infra sdkapi.NodePlacement `json:"infra,omitempty"` + // Selectors and tolerations that should apply to cdi infrastructure components + Infra ComponentConfig `json:"infra,omitempty"` // Restrict on which nodes CDI workload pods will be scheduled - Workloads sdkapi.NodePlacement `json:"workload,omitempty"` + Workloads sdkapi.NodePlacement `json:"workload,omitempty"` + CustomizeComponents CustomizeComponents `json:"customizeComponents,omitempty"` // Clone strategy override: should we use a host-assisted copy even if snapshots are available? // +kubebuilder:validation:Enum="copy";"snapshot";"csi-clone" CloneStrategyOverride *CDICloneStrategy `json:"cloneStrategyOverride,omitempty"` @@ -839,6 +846,18 @@ type CDISpec struct { PriorityClass *CDIPriorityClass `json:"priorityClass,omitempty"` } +// ComponentConfig defines the scheduling and replicas configuration for CDI components +type ComponentConfig struct { + // NodePlacement describes scheduling configuration for specific CDI components + sdkapi.NodePlacement `json:",inline"` + // DeploymentReplicas set Replicas for cdi-deployment + DeploymentReplicas *int32 `json:"deploymentReplicas,omitempty"` + // ApiserverReplicas set Replicas for cdi-apiserver + APIServerReplicas *int32 `json:"apiServerReplicas,omitempty"` + // UploadproxyReplicas set Replicas for cdi-uploadproxy + UploadProxyReplicas *int32 `json:"uploadProxyReplicas,omitempty"` +} + // CDIPriorityClass defines the priority class of the CDI control plane. type CDIPriorityClass string @@ -856,6 +875,47 @@ const ( CloneStrategyCsiClone CDICloneStrategy = "csi-clone" ) +// CustomizeComponents defines patches for components deployed by the CDI operator. +type CustomizeComponents struct { + // +listType=atomic + Patches []CustomizeComponentsPatch `json:"patches,omitempty"` + + // Configure the value used for deployment and daemonset resources + Flags *Flags `json:"flags,omitempty"` +} + +// Flags will create a patch that will replace all flags for the container's +// command field. The only flags that will be used are those define. There are no +// guarantees around forward/backward compatibility. If set incorrectly this will +// cause the resource when rolled out to error until flags are updated. +type Flags struct { + API map[string]string `json:"api,omitempty"` + Controller map[string]string `json:"controller,omitempty"` + UploadProxy map[string]string `json:"uploadProxy,omitempty"` +} + +// CustomizeComponentsPatch defines a patch for some resource. +type CustomizeComponentsPatch struct { + // +kubebuilder:validation:MinLength=1 + ResourceName string `json:"resourceName"` + // +kubebuilder:validation:MinLength=1 + ResourceType string `json:"resourceType"` + Patch string `json:"patch"` + Type PatchType `json:"type"` +} + +// PatchType defines the patch type. +type PatchType string + +const ( + // JSONPatchType is a constant that represents the type of JSON patch. + JSONPatchType PatchType = "json" + // MergePatchType is a constant that represents the type of JSON Merge patch. + MergePatchType PatchType = "merge" + // StrategicMergePatchType is a constant that represents the type of Strategic Merge patch. + StrategicMergePatchType PatchType = "strategic" +) + // DataImportCronSourceFormat defines the format of the DataImportCron-created disk image sources type DataImportCronSourceFormat string diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go index c95c63c7..49246a91 100644 --- a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go @@ -182,7 +182,7 @@ func (StorageProfileSpec) SwaggerDoc() map[string]string { return map[string]string{ "": "StorageProfileSpec defines specification for StorageProfile", "cloneStrategy": "CloneStrategy defines the preferred method for performing a CDI clone", - "claimPropertySets": "ClaimPropertySets is a provided set of properties applicable to PVC", + "claimPropertySets": "ClaimPropertySets is a provided set of properties applicable to PVC\n+kubebuilder:validation:MaxItems=8", "dataImportCronSourceFormat": "DataImportCronSourceFormat defines the format of the DataImportCron-created disk image sources", "snapshotClass": "SnapshotClass is optional specific VolumeSnapshotClass for CloneStrategySnapshot. If not set, a VolumeSnapshotClass is chosen according to the provisioner.", } @@ -194,7 +194,7 @@ func (StorageProfileStatus) SwaggerDoc() map[string]string { "storageClass": "The StorageClass name for which capabilities are defined", "provisioner": "The Storage class provisioner plugin name", "cloneStrategy": "CloneStrategy defines the preferred method for performing a CDI clone", - "claimPropertySets": "ClaimPropertySets computed from the spec and detected in the system", + "claimPropertySets": "ClaimPropertySets computed from the spec and detected in the system\n+kubebuilder:validation:MaxItems=8", "dataImportCronSourceFormat": "DataImportCronSourceFormat defines the format of the DataImportCron-created disk image sources", "snapshotClass": "SnapshotClass is optional specific VolumeSnapshotClass for CloneStrategySnapshot. If not set, a VolumeSnapshotClass is chosen according to the provisioner.", } @@ -203,8 +203,8 @@ func (StorageProfileStatus) SwaggerDoc() map[string]string { func (ClaimPropertySet) SwaggerDoc() map[string]string { return map[string]string{ "": "ClaimPropertySet is a set of properties applicable to PVC", - "accessModes": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional", - "volumeMode": "VolumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.\n+optional", + "accessModes": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+kubebuilder:validation:MaxItems=4\n+kubebuilder:validation:XValidation:rule=\"self.all(am, am in ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany', 'ReadWriteOncePod'])\", message=\"Illegal AccessMode\"", + "volumeMode": "VolumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.\n+kubebuilder:validation:Enum=\"Block\";\"Filesystem\"", } } @@ -428,7 +428,7 @@ func (CDISpec) SwaggerDoc() map[string]string { "": "CDISpec defines our specification for the CDI installation", "imagePullPolicy": "+kubebuilder:validation:Enum=Always;IfNotPresent;Never\nPullPolicy describes a policy for if/when to pull a container image", "uninstallStrategy": "+kubebuilder:validation:Enum=RemoveWorkloads;BlockUninstallIfWorkloadsExist\nCDIUninstallStrategy defines the state to leave CDI on uninstall", - "infra": "Rules on which nodes CDI infrastructure pods will be scheduled", + "infra": "Selectors and tolerations that should apply to cdi infrastructure components", "workload": "Restrict on which nodes CDI workload pods will be scheduled", "cloneStrategyOverride": "Clone strategy override: should we use a host-assisted copy even if snapshots are available?\n+kubebuilder:validation:Enum=\"copy\";\"snapshot\";\"csi-clone\"", "config": "CDIConfig at CDI level", @@ -437,6 +437,37 @@ func (CDISpec) SwaggerDoc() map[string]string { } } +func (ComponentConfig) SwaggerDoc() map[string]string { + return map[string]string{ + "": "ComponentConfig defines the scheduling and replicas configuration for CDI components", + "deploymentReplicas": "DeploymentReplicas set Replicas for cdi-deployment", + "apiServerReplicas": "ApiserverReplicas set Replicas for cdi-apiserver", + "uploadProxyReplicas": "UploadproxyReplicas set Replicas for cdi-uploadproxy", + } +} + +func (CustomizeComponents) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CustomizeComponents defines patches for components deployed by the CDI operator.", + "patches": "+listType=atomic", + "flags": "Configure the value used for deployment and daemonset resources", + } +} + +func (Flags) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Flags will create a patch that will replace all flags for the container's\ncommand field. The only flags that will be used are those define. There are no\nguarantees around forward/backward compatibility. If set incorrectly this will\ncause the resource when rolled out to error until flags are updated.", + } +} + +func (CustomizeComponentsPatch) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CustomizeComponentsPatch defines a patch for some resource.", + "resourceName": "+kubebuilder:validation:MinLength=1", + "resourceType": "+kubebuilder:validation:MinLength=1", + } +} + func (CDIStatus) SwaggerDoc() map[string]string { return map[string]string{ "": "CDIStatus defines the status of the installation", diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go index 39d84a69..30b665b7 100644 --- a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go @@ -303,6 +303,7 @@ func (in *CDISpec) DeepCopyInto(out *CDISpec) { } in.Infra.DeepCopyInto(&out.Infra) in.Workloads.DeepCopyInto(&out.Workloads) + in.CustomizeComponents.DeepCopyInto(&out.CustomizeComponents) if in.CloneStrategyOverride != nil { in, out := &in.CloneStrategyOverride, &out.CloneStrategyOverride *out = new(CDICloneStrategy) @@ -405,6 +406,38 @@ func (in *ClaimPropertySet) DeepCopy() *ClaimPropertySet { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentConfig) DeepCopyInto(out *ComponentConfig) { + *out = *in + in.NodePlacement.DeepCopyInto(&out.NodePlacement) + if in.DeploymentReplicas != nil { + in, out := &in.DeploymentReplicas, &out.DeploymentReplicas + *out = new(int32) + **out = **in + } + if in.APIServerReplicas != nil { + in, out := &in.APIServerReplicas, &out.APIServerReplicas + *out = new(int32) + **out = **in + } + if in.UploadProxyReplicas != nil { + in, out := &in.UploadProxyReplicas, &out.UploadProxyReplicas + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentConfig. +func (in *ComponentConfig) DeepCopy() *ComponentConfig { + if in == nil { + return nil + } + out := new(ComponentConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConditionState) DeepCopyInto(out *ConditionState) { *out = *in @@ -423,6 +456,48 @@ func (in *ConditionState) DeepCopy() *ConditionState { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizeComponents) DeepCopyInto(out *CustomizeComponents) { + *out = *in + if in.Patches != nil { + in, out := &in.Patches, &out.Patches + *out = make([]CustomizeComponentsPatch, len(*in)) + copy(*out, *in) + } + if in.Flags != nil { + in, out := &in.Flags, &out.Flags + *out = new(Flags) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizeComponents. +func (in *CustomizeComponents) DeepCopy() *CustomizeComponents { + if in == nil { + return nil + } + out := new(CustomizeComponents) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizeComponentsPatch) DeepCopyInto(out *CustomizeComponentsPatch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizeComponentsPatch. +func (in *CustomizeComponentsPatch) DeepCopy() *CustomizeComponentsPatch { + if in == nil { + return nil + } + out := new(CustomizeComponentsPatch) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataImportCron) DeepCopyInto(out *DataImportCron) { *out = *in @@ -1193,6 +1268,43 @@ func (in *FilesystemOverhead) DeepCopy() *FilesystemOverhead { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Flags) DeepCopyInto(out *Flags) { + *out = *in + if in.API != nil { + in, out := &in.API, &out.API + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.UploadProxy != nil { + in, out := &in.UploadProxy, &out.UploadProxy + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Flags. +func (in *Flags) DeepCopy() *Flags { + if in == nil { + return nil + } + out := new(Flags) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImportProxy) DeepCopyInto(out *ImportProxy) { *out = *in diff --git a/vendor/modules.txt b/vendor/modules.txt index 614643d1..ff646114 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -653,8 +653,8 @@ k8s.io/utils/strings/slices ## explicit; go 1.17 kubevirt.io/api/core kubevirt.io/api/core/v1 -# kubevirt.io/containerized-data-importer-api v1.58.1 -## explicit; go 1.19 +# kubevirt.io/containerized-data-importer-api v1.59.0 +## explicit; go 1.21 kubevirt.io/containerized-data-importer-api/pkg/apis/core kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1 # kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90