From 891b6956ee51e500f4b16f156244af99a0e6c4b4 Mon Sep 17 00:00:00 2001 From: Bella Khizgiyaev Date: Wed, 13 Sep 2023 16:53:26 +0300 Subject: [PATCH 1/5] OVA: changing nfs mount to server and conversion pod to use shared PVC Signed-off-by: Bella Khizgiyaev --- pkg/controller/plan/kubevirt.go | 120 ++++++++-- pkg/controller/provider/BUILD.bazel | 2 + .../provider/container/ova/client.go | 2 +- pkg/controller/provider/controller.go | 147 +------------ pkg/controller/provider/ova-setup.go | 205 ++++++++++++++++++ 5 files changed, 316 insertions(+), 160 deletions(-) create mode 100644 pkg/controller/provider/ova-setup.go diff --git a/pkg/controller/plan/kubevirt.go b/pkg/controller/plan/kubevirt.go index a2129d994..3656928af 100644 --- a/pkg/controller/plan/kubevirt.go +++ b/pkg/controller/plan/kubevirt.go @@ -32,6 +32,7 @@ import ( libcnd "github.com/konveyor/forklift-controller/pkg/lib/condition" liberr "github.com/konveyor/forklift-controller/pkg/lib/error" core "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -1336,23 +1337,19 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi switch r.Source.Provider.Type() { case api.Ova: - server := r.Source.Provider.Spec.URL - splitted := strings.Split(server, ":") - - if len(splitted) != 2 { - r.Log.Info("The NFS server path format is wrong") - return + pvcName := fmt.Sprintf("ova-server-pvc-%s-%s", r.Source.Provider.Name, r.Plan.Name) + // If the provider runs in a different namespace then the destination VM, + // we need to create the PV and PVC to access the NFS. + if r.Plan.Spec.TargetNamespace != r.Source.Provider.Namespace { + r.CreatePvcForNfs(pvcName) } - nfsServer := splitted[0] - nfsPath := splitted[1] //path from disk volumes = append(volumes, core.Volume{ - Name: "nfs", + Name: "nfs-pvc", VolumeSource: core.VolumeSource{ - NFS: &core.NFSVolumeSource{ - Server: nfsServer, - Path: nfsPath, + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, }, }, }) @@ -1366,7 +1363,7 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi MountPath: "/opt", }, core.VolumeMount{ - Name: "nfs", + Name: "nfs-pvc", MountPath: "/ova", }, ) @@ -1835,6 +1832,103 @@ func (r *KubeVirt) EnsurePersistentVolume(vmRef ref.Ref, persistentVolumes []cor return } +func (r *KubeVirt) CreatePvcForNfs(pvcName string) (err error) { + //TODO: set ownerReferenceso the PV and PVC deleted once the plan is done. + // ownerReference := meta.OwnerReference{ + // APIVersion: "forklift.konveyor.io/v1beta1", + // Kind: "Plan", + // Name: r.Plan.Name, + // UID: r.Plan.UID, + // } + + sourceProvider := r.Source.Provider + pvName := fmt.Sprintf("ova-server-pv-%s", r.Plan.Name) + splitted := strings.Split(sourceProvider.Spec.URL, ":") + + if len(splitted) != 2 { + r.Log.Error(nil, "NFS server path doesn't contains :") + } + nfsServer := splitted[0] + nfsPath := splitted[1] + + pv := &v1.PersistentVolume{ + ObjectMeta: meta.ObjectMeta{ + Name: pvName, + }, + Spec: v1.PersistentVolumeSpec{ + Capacity: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse("1Gi"), + }, + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadOnlyMany, + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + NFS: &v1.NFSVolumeSource{ + Path: nfsPath, + Server: nfsServer, + }, + }, + }, + } + err = r.Create(context.TODO(), pv) + if err != nil { + r.Log.Error(err, "Failed to create OVA plan PV") + return + } + + sc := "" + pvc := &v1.PersistentVolumeClaim{ + ObjectMeta: meta.ObjectMeta{ + Name: pvcName, + Namespace: r.Plan.Spec.TargetNamespace, + }, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadOnlyMany, + }, + VolumeName: pvName, + StorageClassName: &sc, + }, + } + err = r.Create(context.TODO(), pvc) + if err != nil { + r.Log.Error(err, "Failed to create OVA plan PVC") + return + } + + // wait until pvc and pv are bounded. + timeout := time.After(5 * time.Minute) + tick := time.Tick(5 * time.Second) + pvcNamespacedName := types.NamespacedName{ + Namespace: r.Plan.Spec.TargetNamespace, + Name: pvcName, + } + + for { + select { + case <-timeout: + r.Log.Error(err, "Timed out waiting for PVC to be bound") + return + case <-tick: + err = r.Get(context.TODO(), pvcNamespacedName, pvc) + fmt.Print("this is pvc: ", pvc) + if err != nil { + r.Log.Error(err, "Failed to bound OVA plan PVC") + return + } + + if pvc.Status.Phase == "Bound" { + return + } + } + } +} + // Ensure the PV exist on the destination. func (r *KubeVirt) EnsurePersistentVolumeClaim(vmRef ref.Ref, persistentVolumeClaims []core.PersistentVolumeClaim) (err error) { list, err := r.getPVCs(vmRef) diff --git a/pkg/controller/provider/BUILD.bazel b/pkg/controller/provider/BUILD.bazel index 348974097..b5065cb1d 100644 --- a/pkg/controller/provider/BUILD.bazel +++ b/pkg/controller/provider/BUILD.bazel @@ -4,6 +4,7 @@ go_library( name = "provider", srcs = [ "controller.go", + "ova-setup.go", "predicate.go", "validation.go", ], @@ -30,6 +31,7 @@ go_library( "//vendor/k8s.io/api/apps/v1:apps", "//vendor/k8s.io/api/core/v1:core", "//vendor/k8s.io/apimachinery/pkg/api/errors", + "//vendor/k8s.io/apimachinery/pkg/api/resource", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:meta", "//vendor/k8s.io/apimachinery/pkg/util/intstr", "//vendor/k8s.io/apiserver/pkg/storage/names", diff --git a/pkg/controller/provider/container/ova/client.go b/pkg/controller/provider/container/ova/client.go index a5f440fc5..6d4728bec 100644 --- a/pkg/controller/provider/container/ova/client.go +++ b/pkg/controller/provider/container/ova/client.go @@ -48,7 +48,7 @@ func (r *Client) Connect(provider *api.Provider) (err error) { }, } - serverURL := fmt.Sprintf("http://ova-service-%s:8080", provider.Name) + serverURL := fmt.Sprintf("http://ova-service-%s.%s.svc.cluster.local:8080", provider.Name, provider.Namespace) if serverURL == "" { return } diff --git a/pkg/controller/provider/controller.go b/pkg/controller/provider/controller.go index 84034adee..073c01e7a 100644 --- a/pkg/controller/provider/controller.go +++ b/pkg/controller/provider/controller.go @@ -21,7 +21,6 @@ import ( "fmt" "os" "path/filepath" - "strings" "sync" api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" @@ -42,8 +41,6 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apiserver/pkg/storage/names" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -64,13 +61,6 @@ var log = logging.WithName(Name) // Application settings. var Settings = &settings.Settings -const ( - ovaServerPrefix = "ova-server" - ovaImageVar = "OVA_PROVIDER_SERVER_IMAGE" - nfsVolumeNamePrefix = "nfs-volume" - mountPath = "/ova" -) - // Creates a new Inventory Controller and adds it to the Manager. func Add(mgr manager.Manager) error { libfb.WorkingDir = Settings.WorkingDir @@ -207,7 +197,7 @@ func (r Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (r // If the deployment does not exist if k8serr.IsNotFound(err) { - r.createOVAServerDeployment(provider, ctx) + r.CreateOVAServerDeployment(provider, ctx) } else if err != nil { return } @@ -364,141 +354,6 @@ func (r *Reconciler) getSecret(provider *api.Provider) (*v1.Secret, error) { return secret, nil } -func (r *Reconciler) createOVAServerDeployment(provider *api.Provider, ctx context.Context) { - - deploymentName := fmt.Sprintf("%s-deployment-%s", ovaServerPrefix, provider.Name) - annotations := make(map[string]string) - labels := map[string]string{"providerName": provider.Name, "app": "forklift"} - url := provider.Spec.URL - var replicas int32 = 1 - - ownerReference := metav1.OwnerReference{ - APIVersion: "forklift.konveyor.io/v1beta1", - Kind: "Provider", - Name: provider.Name, - UID: provider.UID, - } - - //OVA server deployment - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Namespace: provider.Namespace, - Annotations: annotations, - Labels: labels, - OwnerReferences: []metav1.OwnerReference{ownerReference}, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "forklift", - }, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "providerName": provider.Name, - "app": "forklift", - }, - }, - Spec: r.makeOvaProviderPodSpec(url, string(provider.Name)), - }, - }, - } - - err := r.Create(ctx, deployment) - if err != nil { - r.Log.Error(err, "Failed to create OVA server deployment") - return - } - - // OVA Server Service - serviceName := fmt.Sprintf("ova-service-%s", provider.Name) - service := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: provider.Namespace, - Labels: labels, - OwnerReferences: []metav1.OwnerReference{ownerReference}, - }, - Spec: v1.ServiceSpec{ - Selector: map[string]string{ - "providerName": provider.Name, - "app": "forklift", - }, - Ports: []v1.ServicePort{ - { - Name: "api-http", - Protocol: v1.ProtocolTCP, - Port: 8080, - TargetPort: intstr.FromInt(8080), - }, - }, - Type: v1.ServiceTypeClusterIP, - }, - } - - err = r.Create(ctx, service) - if err != nil { - r.Log.Error(err, "Failed to create OVA server service") - return - } -} - -func (r *Reconciler) makeOvaProviderPodSpec(url string, providerName string) v1.PodSpec { - splitted := strings.Split(url, ":") - nonRoot := false - - if len(splitted) != 2 { - r.Log.Error(nil, "NFS server path doesn't contains :") - } - nfsServer := splitted[0] - nfsPath := splitted[1] - - imageName, ok := os.LookupEnv(ovaImageVar) - if !ok { - r.Log.Error(nil, "Failed to find OVA server image") - } - - nfsVolumeName := fmt.Sprintf("%s-%s", nfsVolumeNamePrefix, providerName) - - ovaContainerName := fmt.Sprintf("%s-pod-%s", ovaServerPrefix, providerName) - - return v1.PodSpec{ - - Containers: []v1.Container{ - { - Name: ovaContainerName, - Ports: []v1.ContainerPort{{ContainerPort: 8080, Protocol: v1.ProtocolTCP}}, - SecurityContext: &v1.SecurityContext{ - RunAsNonRoot: &nonRoot, - }, - Image: imageName, - VolumeMounts: []v1.VolumeMount{ - { - Name: nfsVolumeName, - MountPath: "/ova", - }, - }, - }, - }, - ServiceAccountName: "forklift-controller", - Volumes: []v1.Volume{ - { - Name: nfsVolumeName, - VolumeSource: v1.VolumeSource{ - NFS: &v1.NFSVolumeSource{ - Server: nfsServer, - Path: nfsPath, - ReadOnly: false, - }, - }, - }, - }, - } -} - // Provider catalog. type Catalog struct { mutex sync.Mutex diff --git a/pkg/controller/provider/ova-setup.go b/pkg/controller/provider/ova-setup.go new file mode 100644 index 000000000..294b87c5f --- /dev/null +++ b/pkg/controller/provider/ova-setup.go @@ -0,0 +1,205 @@ +package provider + +import ( + "context" + "fmt" + "os" + "strings" + + api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + ovaServerPrefix = "ova-server" + ovaImageVar = "OVA_PROVIDER_SERVER_IMAGE" + nfsVolumeNamePrefix = "nfs-volume" + mountPath = "/ova" + pvSize = "1Gi" +) + +func (r *Reconciler) CreateOVAServerDeployment(provider *api.Provider, ctx context.Context) { + + ownerReference := metav1.OwnerReference{ + APIVersion: "forklift.konveyor.io/v1beta1", + Kind: "Provider", + Name: provider.Name, + UID: provider.UID, + } + + pvName := fmt.Sprintf("%s-pv-%s", ovaServerPrefix, provider.Name) + splitted := strings.Split(provider.Spec.URL, ":") + + if len(splitted) != 2 { + r.Log.Error(nil, "NFS server path doesn't contains :") + } + nfsServer := splitted[0] + nfsPath := splitted[1] + + pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvName, + OwnerReferences: []metav1.OwnerReference{ownerReference}, + }, + Spec: v1.PersistentVolumeSpec{ + Capacity: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse("1Gi"), + }, + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadOnlyMany, + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + NFS: &v1.NFSVolumeSource{ + Path: nfsPath, + Server: nfsServer, + }, + }, + }, + } + err := r.Create(ctx, pv) + if err != nil { + r.Log.Error(err, "Failed to create OVA server PV") + return + } + + pvcName := fmt.Sprintf("%s-pvc-%s", ovaServerPrefix, provider.Name) + sc := "" + pvc := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: provider.Namespace, + OwnerReferences: []metav1.OwnerReference{ownerReference}, + }, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadOnlyMany, + }, + VolumeName: pvName, + StorageClassName: &sc, + }, + } + err = r.Create(ctx, pvc) + if err != nil { + r.Log.Error(err, "Failed to create OVA server PVC") + return + } + + deploymentName := fmt.Sprintf("%s-deployment-%s", ovaServerPrefix, provider.Name) + annotations := make(map[string]string) + labels := map[string]string{"providerName": provider.Name, "app": "forklift"} + var replicas int32 = 1 + + //OVA server deployment + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentName, + Namespace: provider.Namespace, + Annotations: annotations, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ownerReference}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "forklift", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "providerName": provider.Name, + "app": "forklift", + }, + }, + Spec: r.makeOvaProviderPodSpec(pvcName, string(provider.Name)), + }, + }, + } + + err = r.Create(ctx, deployment) + if err != nil { + r.Log.Error(err, "Failed to create OVA server deployment") + return + } + + // OVA Server Service + serviceName := fmt.Sprintf("ova-service-%s", provider.Name) + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: provider.Namespace, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ownerReference}, + }, + Spec: v1.ServiceSpec{ + Selector: map[string]string{ + "providerName": provider.Name, + "app": "forklift", + }, + Ports: []v1.ServicePort{ + { + Name: "api-http", + Protocol: v1.ProtocolTCP, + Port: 8080, + TargetPort: intstr.FromInt(8080), + }, + }, + Type: v1.ServiceTypeClusterIP, + }, + } + + err = r.Create(ctx, service) + if err != nil { + r.Log.Error(err, "Failed to create OVA server service") + return + } +} + +func (r *Reconciler) makeOvaProviderPodSpec(pvcName string, providerName string) v1.PodSpec { + + imageName, ok := os.LookupEnv(ovaImageVar) + if !ok { + r.Log.Error(nil, "Failed to find OVA server image") + } + + nfsVolumeName := fmt.Sprintf("%s-%s", nfsVolumeNamePrefix, providerName) + + ovaContainerName := fmt.Sprintf("%s-pod-%s", ovaServerPrefix, providerName) + + return v1.PodSpec{ + + Containers: []v1.Container{ + { + Name: ovaContainerName, + Ports: []v1.ContainerPort{{ContainerPort: 8080, Protocol: v1.ProtocolTCP}}, + Image: imageName, + VolumeMounts: []v1.VolumeMount{ + { + Name: nfsVolumeName, + MountPath: "/ova", + }, + }, + }, + }, + Volumes: []v1.Volume{ + { + Name: nfsVolumeName, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + }, + }, + } +} From 520baedfc710a2ac3925c1fce0f1b8c59057b828 Mon Sep 17 00:00:00 2001 From: Liran Rotenberg Date: Tue, 26 Sep 2023 10:55:56 +0300 Subject: [PATCH 2/5] Refactoring the OVA NFS Signed-off-by: Liran Rotenberg --- pkg/controller/plan/kubevirt.go | 157 ++++++++++++++++++++------- pkg/controller/provider/ova-setup.go | 133 +++++++++++++---------- 2 files changed, 191 insertions(+), 99 deletions(-) diff --git a/pkg/controller/plan/kubevirt.go b/pkg/controller/plan/kubevirt.go index 3656928af..b0dc61945 100644 --- a/pkg/controller/plan/kubevirt.go +++ b/pkg/controller/plan/kubevirt.go @@ -32,7 +32,6 @@ import ( libcnd "github.com/konveyor/forklift-controller/pkg/lib/condition" liberr "github.com/konveyor/forklift-controller/pkg/lib/error" core "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -1185,7 +1184,10 @@ func (r *KubeVirt) findTemplate(vm *plan.VMStatus) (tmpl *template.Template, err } func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs *[]core.PersistentVolumeClaim, v2vSecret *core.Secret) (pod *core.Pod, err error) { - volumes, volumeMounts, volumeDevices := r.podVolumeMounts(vmVolumes, configMap, pvcs) + volumes, volumeMounts, volumeDevices, err := r.podVolumeMounts(vmVolumes, configMap, pvcs) + if err != nil { + return + } // qemu group fsGroup := qemuGroup @@ -1291,7 +1293,7 @@ func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, return } -func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs *[]core.PersistentVolumeClaim) (volumes []core.Volume, mounts []core.VolumeMount, devices []core.VolumeDevice) { +func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs *[]core.PersistentVolumeClaim) (volumes []core.Volume, mounts []core.VolumeMount, devices []core.VolumeDevice, err error) { pvcsByName := make(map[string]core.PersistentVolumeClaim) for _, pvc := range *pvcs { pvcsByName[pvc.Name] = pvc @@ -1337,18 +1339,32 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi switch r.Source.Provider.Type() { case api.Ova: - pvcName := fmt.Sprintf("ova-server-pvc-%s-%s", r.Source.Provider.Name, r.Plan.Name) - // If the provider runs in a different namespace then the destination VM, - // we need to create the PV and PVC to access the NFS. - if r.Plan.Spec.TargetNamespace != r.Source.Provider.Namespace { - r.CreatePvcForNfs(pvcName) + pvcName := fmt.Sprintf("ova-nfs-pvc-%s-%s", r.Source.Provider.Name, r.Plan.Name) + /* + TODO: set ownerReferences the PV and PVC deleted once the plan is done. + cross namespaces are not possible to be used for plan/provider ownership. + optional: PV+PVC per VM, deleted via cleanup. ownership: importer pod? DV? + ownerReference := meta.OwnerReference{ + APIVersion: "forklift.konveyor.io/v1beta1", + Kind: "Plan", + Name: r.Plan.Name, + UID: r.Plan.UID, + } + */ + err = r.CreatePvForNfs() + if err != nil { + return + } + err = r.CreatePvcForNfs(pvcName) + if err != nil { + return } //path from disk volumes = append(volumes, core.Volume{ - Name: "nfs-pvc", + Name: "nfs-pv", VolumeSource: core.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ ClaimName: pvcName, }, }, @@ -1363,7 +1379,7 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi MountPath: "/opt", }, core.VolumeMount{ - Name: "nfs-pvc", + Name: "nfs-pv", MountPath: "/ova", }, ) @@ -1832,70 +1848,129 @@ func (r *KubeVirt) EnsurePersistentVolume(vmRef ref.Ref, persistentVolumes []cor return } -func (r *KubeVirt) CreatePvcForNfs(pvcName string) (err error) { - //TODO: set ownerReferenceso the PV and PVC deleted once the plan is done. - // ownerReference := meta.OwnerReference{ - // APIVersion: "forklift.konveyor.io/v1beta1", - // Kind: "Plan", - // Name: r.Plan.Name, - // UID: r.Plan.UID, - // } +func (r *KubeVirt) getOvaPvNfs() (found bool, err error) { + pv := &core.PersistentVolume{} + err = r.Destination.Client.Get( + context.TODO(), + types.NamespacedName{ + Name: fmt.Sprintf("ova-nfs-pv-%s", r.Plan.Name), + }, + pv, + ) + + if err != nil { + if k8serr.IsNotFound(err) { + return false, nil + } + err = liberr.Wrap(err) + return + } + return true, nil +} +func (r *KubeVirt) getOvaPvcNfs() (found bool, err error) { + pvc := &core.PersistentVolumeClaim{} + err = r.Destination.Client.Get( + context.TODO(), + types.NamespacedName{ + Name: fmt.Sprintf("ova-nfs-pvc-%s-%s", r.Source.Provider.Name, r.Plan.Name), + Namespace: r.Plan.Spec.TargetNamespace, + }, + pvc, + ) + + if err != nil { + if k8serr.IsNotFound(err) { + return false, nil + } + err = liberr.Wrap(err) + return + } + return true, nil +} + +// TODO if we use ownership get it to the method +func (r *KubeVirt) CreatePvForNfs() (err error) { sourceProvider := r.Source.Provider - pvName := fmt.Sprintf("ova-server-pv-%s", r.Plan.Name) splitted := strings.Split(sourceProvider.Spec.URL, ":") - if len(splitted) != 2 { - r.Log.Error(nil, "NFS server path doesn't contains :") + r.Log.Error(nil, "NFS server path doesn't contains: ", "url", sourceProvider.Spec.URL) + return fmt.Errorf("bad source provider %s URL %s", sourceProvider.Name, sourceProvider.Spec.URL) } nfsServer := splitted[0] nfsPath := splitted[1] - pv := &v1.PersistentVolume{ + pvName := fmt.Sprintf("ova-nfs-pv-%s", r.Plan.Name) + found, err := r.getOvaPvNfs() + if err != nil { + return + } + if found { + r.Log.Info("The PV for OVA NFS exists", "PV", pvName) + return + } + + pv := &core.PersistentVolume{ ObjectMeta: meta.ObjectMeta{ Name: pvName, + // OwnerReferences: []meta.OwnerReference{ownerReference}, }, - Spec: v1.PersistentVolumeSpec{ - Capacity: v1.ResourceList{ - v1.ResourceStorage: resource.MustParse("1Gi"), + Spec: core.PersistentVolumeSpec{ + Capacity: core.ResourceList{ + core.ResourceStorage: resource.MustParse("1Gi"), }, - AccessModes: []v1.PersistentVolumeAccessMode{ - v1.ReadOnlyMany, + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadOnlyMany, }, - PersistentVolumeSource: v1.PersistentVolumeSource{ - NFS: &v1.NFSVolumeSource{ + PersistentVolumeSource: core.PersistentVolumeSource{ + NFS: &core.NFSVolumeSource{ Path: nfsPath, Server: nfsServer, }, }, }, } - err = r.Create(context.TODO(), pv) + err = r.Destination.Create(context.TODO(), pv) if err != nil { r.Log.Error(err, "Failed to create OVA plan PV") return } + return +} + +// TODO if we use ownership get it to the method +func (r *KubeVirt) CreatePvcForNfs(pvcName string) (err error) { + found, err := r.getOvaPvcNfs() + if err != nil { + return + } + if found { + r.Log.Info("The PVC for OVA NFS exists", "PVC", pvcName) + return + } sc := "" - pvc := &v1.PersistentVolumeClaim{ + pvName := fmt.Sprintf("ova-nfs-pv-%s", r.Plan.Name) + pvc := &core.PersistentVolumeClaim{ ObjectMeta: meta.ObjectMeta{ Name: pvcName, Namespace: r.Plan.Spec.TargetNamespace, + // OwnerReferences: []meta.OwnerReference{ownerReference}, }, - Spec: v1.PersistentVolumeClaimSpec{ - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceStorage: resource.MustParse("1Gi"), + Spec: core.PersistentVolumeClaimSpec{ + Resources: core.ResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceStorage: resource.MustParse("1Gi"), }, }, - AccessModes: []v1.PersistentVolumeAccessMode{ - v1.ReadOnlyMany, + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadOnlyMany, }, VolumeName: pvName, StorageClassName: &sc, }, } - err = r.Create(context.TODO(), pvc) + err = r.Destination.Create(context.TODO(), pvc) if err != nil { r.Log.Error(err, "Failed to create OVA plan PVC") return @@ -1913,15 +1988,13 @@ func (r *KubeVirt) CreatePvcForNfs(pvcName string) (err error) { select { case <-timeout: r.Log.Error(err, "Timed out waiting for PVC to be bound") - return + return fmt.Errorf("timeout passed waiting for the OVA PVC %v", pvc) case <-tick: err = r.Get(context.TODO(), pvcNamespacedName, pvc) - fmt.Print("this is pvc: ", pvc) if err != nil { r.Log.Error(err, "Failed to bound OVA plan PVC") return } - if pvc.Status.Phase == "Bound" { return } diff --git a/pkg/controller/provider/ova-setup.go b/pkg/controller/provider/ova-setup.go index 294b87c5f..6933cd823 100644 --- a/pkg/controller/provider/ova-setup.go +++ b/pkg/controller/provider/ova-setup.go @@ -8,7 +8,7 @@ import ( api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + core "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -22,66 +22,93 @@ const ( pvSize = "1Gi" ) -func (r *Reconciler) CreateOVAServerDeployment(provider *api.Provider, ctx context.Context) { - +func (r Reconciler) CreateOVAServerDeployment(provider *api.Provider, ctx context.Context) { ownerReference := metav1.OwnerReference{ APIVersion: "forklift.konveyor.io/v1beta1", Kind: "Provider", Name: provider.Name, UID: provider.UID, } - pvName := fmt.Sprintf("%s-pv-%s", ovaServerPrefix, provider.Name) - splitted := strings.Split(provider.Spec.URL, ":") + err := r.createPvForNfs(provider, ctx, ownerReference, pvName) + if err != nil { + r.Log.Error(err, "Failed to create NFS PV for the OVA server") + return + } + + pvcName := fmt.Sprintf("%s-pvc-%s", ovaServerPrefix, provider.Name) + err = r.createPvcForNfs(provider, ctx, ownerReference, pvName, pvcName) + if err != nil { + r.Log.Error(err, "Failed to create NFS PVC for the OVA server") + return + } + + labels := map[string]string{"providerName": provider.Name, "app": "forklift"} + err = r.createServerDeployment(provider, ctx, ownerReference, pvcName, labels) + if err != nil { + r.Log.Error(err, "Failed to create OVA server deployment") + return + } + + err = r.createServerService(provider, ctx, ownerReference, labels) + if err != nil { + r.Log.Error(err, "Failed to create OVA server service") + return + } +} +func (r *Reconciler) createPvForNfs(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvName string) (err error) { + splitted := strings.Split(provider.Spec.URL, ":") if len(splitted) != 2 { - r.Log.Error(nil, "NFS server path doesn't contains :") + r.Log.Error(nil, "NFS server path doesn't contains: ", "provider", provider, "url", provider.Spec.URL) + return fmt.Errorf("wrong NFS server path") } nfsServer := splitted[0] nfsPath := splitted[1] - pv := &v1.PersistentVolume{ + pv := &core.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: pvName, OwnerReferences: []metav1.OwnerReference{ownerReference}, }, - Spec: v1.PersistentVolumeSpec{ - Capacity: v1.ResourceList{ - v1.ResourceStorage: resource.MustParse("1Gi"), + Spec: core.PersistentVolumeSpec{ + Capacity: core.ResourceList{ + core.ResourceStorage: resource.MustParse(pvSize), }, - AccessModes: []v1.PersistentVolumeAccessMode{ - v1.ReadOnlyMany, + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadOnlyMany, }, - PersistentVolumeSource: v1.PersistentVolumeSource{ - NFS: &v1.NFSVolumeSource{ + PersistentVolumeSource: core.PersistentVolumeSource{ + NFS: &core.NFSVolumeSource{ Path: nfsPath, Server: nfsServer, }, }, }, } - err := r.Create(ctx, pv) + err = r.Create(ctx, pv) if err != nil { - r.Log.Error(err, "Failed to create OVA server PV") return } + return +} - pvcName := fmt.Sprintf("%s-pvc-%s", ovaServerPrefix, provider.Name) +func (r *Reconciler) createPvcForNfs(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvName, pvcName string) (err error) { sc := "" - pvc := &v1.PersistentVolumeClaim{ + pvc := &core.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: provider.Namespace, OwnerReferences: []metav1.OwnerReference{ownerReference}, }, - Spec: v1.PersistentVolumeClaimSpec{ - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceStorage: resource.MustParse("1Gi"), + Spec: core.PersistentVolumeClaimSpec{ + Resources: core.ResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceStorage: resource.MustParse(pvSize), }, }, - AccessModes: []v1.PersistentVolumeAccessMode{ - v1.ReadOnlyMany, + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadOnlyMany, }, VolumeName: pvName, StorageClassName: &sc, @@ -89,16 +116,16 @@ func (r *Reconciler) CreateOVAServerDeployment(provider *api.Provider, ctx conte } err = r.Create(ctx, pvc) if err != nil { - r.Log.Error(err, "Failed to create OVA server PVC") return } + return +} +func (r *Reconciler) createServerDeployment(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvcName string, labels map[string]string) (err error) { deploymentName := fmt.Sprintf("%s-deployment-%s", ovaServerPrefix, provider.Name) annotations := make(map[string]string) - labels := map[string]string{"providerName": provider.Name, "app": "forklift"} var replicas int32 = 1 - //OVA server deployment deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: deploymentName, @@ -114,88 +141,80 @@ func (r *Reconciler) CreateOVAServerDeployment(provider *api.Provider, ctx conte "app": "forklift", }, }, - Template: v1.PodTemplateSpec{ + Template: core.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "providerName": provider.Name, - "app": "forklift", - }, + Labels: labels, }, - Spec: r.makeOvaProviderPodSpec(pvcName, string(provider.Name)), + Spec: r.makeOvaProviderPodSpec(pvcName, provider.Name), }, }, } err = r.Create(ctx, deployment) if err != nil { - r.Log.Error(err, "Failed to create OVA server deployment") return } + return +} - // OVA Server Service +func (r *Reconciler) createServerService(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, labels map[string]string) (err error) { serviceName := fmt.Sprintf("ova-service-%s", provider.Name) - service := &v1.Service{ + service := &core.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: provider.Namespace, Labels: labels, OwnerReferences: []metav1.OwnerReference{ownerReference}, }, - Spec: v1.ServiceSpec{ - Selector: map[string]string{ - "providerName": provider.Name, - "app": "forklift", - }, - Ports: []v1.ServicePort{ + Spec: core.ServiceSpec{ + Selector: labels, + Ports: []core.ServicePort{ { Name: "api-http", - Protocol: v1.ProtocolTCP, + Protocol: core.ProtocolTCP, Port: 8080, TargetPort: intstr.FromInt(8080), }, }, - Type: v1.ServiceTypeClusterIP, + Type: core.ServiceTypeClusterIP, }, } err = r.Create(ctx, service) if err != nil { - r.Log.Error(err, "Failed to create OVA server service") return } + return } -func (r *Reconciler) makeOvaProviderPodSpec(pvcName string, providerName string) v1.PodSpec { - +func (r *Reconciler) makeOvaProviderPodSpec(pvcName string, providerName string) core.PodSpec { imageName, ok := os.LookupEnv(ovaImageVar) if !ok { r.Log.Error(nil, "Failed to find OVA server image") } nfsVolumeName := fmt.Sprintf("%s-%s", nfsVolumeNamePrefix, providerName) - ovaContainerName := fmt.Sprintf("%s-pod-%s", ovaServerPrefix, providerName) - return v1.PodSpec{ - - Containers: []v1.Container{ + return core.PodSpec{ + Containers: []core.Container{ { Name: ovaContainerName, - Ports: []v1.ContainerPort{{ContainerPort: 8080, Protocol: v1.ProtocolTCP}}, + Ports: []core.ContainerPort{{ContainerPort: 8080, Protocol: core.ProtocolTCP}}, Image: imageName, - VolumeMounts: []v1.VolumeMount{ + VolumeMounts: []core.VolumeMount{ { Name: nfsVolumeName, - MountPath: "/ova", + MountPath: mountPath, }, }, }, }, - Volumes: []v1.Volume{ + Volumes: []core.Volume{ { Name: nfsVolumeName, - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + VolumeSource: core.VolumeSource{ + PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ ClaimName: pvcName, }, }, From ef76a875c3dd7edd60fe3c0d1fcdbe13731df1dc Mon Sep 17 00:00:00 2001 From: Bella Khizgiyaev Date: Thu, 28 Sep 2023 18:38:58 +0300 Subject: [PATCH 3/5] Delete PVC and PV from destination namespace when the plan is archived Signed-off-by: Bella Khizgiyaev --- pkg/controller/plan/BUILD.bazel | 1 + pkg/controller/plan/kubevirt.go | 101 ++++++++++++--------------- pkg/controller/plan/migration.go | 44 ++++++++++++ pkg/controller/provider/ova-setup.go | 20 +++--- 4 files changed, 100 insertions(+), 66 deletions(-) diff --git a/pkg/controller/plan/BUILD.bazel b/pkg/controller/plan/BUILD.bazel index 0a233c18d..b0c7511ee 100644 --- a/pkg/controller/plan/BUILD.bazel +++ b/pkg/controller/plan/BUILD.bazel @@ -50,6 +50,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime", "//vendor/k8s.io/apimachinery/pkg/types", "//vendor/k8s.io/apimachinery/pkg/util/validation", + "//vendor/k8s.io/apimachinery/pkg/util/wait", "//vendor/k8s.io/apiserver/pkg/storage/names", "//vendor/k8s.io/client-go/kubernetes/scheme", "//vendor/kubevirt.io/api/core/v1:core", diff --git a/pkg/controller/plan/kubevirt.go b/pkg/controller/plan/kubevirt.go index b0dc61945..44d3e0802 100644 --- a/pkg/controller/plan/kubevirt.go +++ b/pkg/controller/plan/kubevirt.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" k8svalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/wait" cnv "kubevirt.io/api/core/v1" libvirtxml "libvirt.org/libvirt-go-xml" @@ -1339,22 +1340,11 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi switch r.Source.Provider.Type() { case api.Ova: - pvcName := fmt.Sprintf("ova-nfs-pvc-%s-%s", r.Source.Provider.Name, r.Plan.Name) - /* - TODO: set ownerReferences the PV and PVC deleted once the plan is done. - cross namespaces are not possible to be used for plan/provider ownership. - optional: PV+PVC per VM, deleted via cleanup. ownership: importer pod? DV? - ownerReference := meta.OwnerReference{ - APIVersion: "forklift.konveyor.io/v1beta1", - Kind: "Plan", - Name: r.Plan.Name, - UID: r.Plan.UID, - } - */ err = r.CreatePvForNfs() if err != nil { return } + pvcName := getEntityName("pvc", r.Source.Provider.Name, r.Plan.Name) err = r.CreatePvcForNfs(pvcName) if err != nil { return @@ -1362,7 +1352,7 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi //path from disk volumes = append(volumes, core.Volume{ - Name: "nfs-pv", + Name: "store-pv", VolumeSource: core.VolumeSource{ PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ ClaimName: pvcName, @@ -1379,7 +1369,7 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi MountPath: "/opt", }, core.VolumeMount{ - Name: "nfs-pv", + Name: "store-pv", MountPath: "/ova", }, ) @@ -1848,72 +1838,69 @@ func (r *KubeVirt) EnsurePersistentVolume(vmRef ref.Ref, persistentVolumes []cor return } -func (r *KubeVirt) getOvaPvNfs() (found bool, err error) { - pv := &core.PersistentVolume{} - err = r.Destination.Client.Get( +func GetOvaPvNfs(client client.Client, planName string, providerName string) (pv *core.PersistentVolume, found bool, err error) { + pv = &core.PersistentVolume{} + err = client.Get( context.TODO(), types.NamespacedName{ - Name: fmt.Sprintf("ova-nfs-pv-%s", r.Plan.Name), + Name: getEntityName("pv", providerName, planName), }, pv, ) if err != nil { if k8serr.IsNotFound(err) { - return false, nil + return nil, false, nil } err = liberr.Wrap(err) return } - return true, nil + return } -func (r *KubeVirt) getOvaPvcNfs() (found bool, err error) { - pvc := &core.PersistentVolumeClaim{} - err = r.Destination.Client.Get( +func GetOvaPvcNfs(client client.Client, planName string, planNamespace string, providerName string) (pvc *core.PersistentVolumeClaim, found bool, err error) { + pvc = &core.PersistentVolumeClaim{} + err = client.Get( context.TODO(), types.NamespacedName{ - Name: fmt.Sprintf("ova-nfs-pvc-%s-%s", r.Source.Provider.Name, r.Plan.Name), - Namespace: r.Plan.Spec.TargetNamespace, + Name: getEntityName("pvc", providerName, planName), + Namespace: planNamespace, }, pvc, ) if err != nil { if k8serr.IsNotFound(err) { - return false, nil + return nil, false, nil } err = liberr.Wrap(err) return } - return true, nil + return } -// TODO if we use ownership get it to the method func (r *KubeVirt) CreatePvForNfs() (err error) { sourceProvider := r.Source.Provider splitted := strings.Split(sourceProvider.Spec.URL, ":") - if len(splitted) != 2 { - r.Log.Error(nil, "NFS server path doesn't contains: ", "url", sourceProvider.Spec.URL) - return fmt.Errorf("bad source provider %s URL %s", sourceProvider.Name, sourceProvider.Spec.URL) - } nfsServer := splitted[0] nfsPath := splitted[1] - pvName := fmt.Sprintf("ova-nfs-pv-%s", r.Plan.Name) - found, err := r.getOvaPvNfs() + _, found, err := GetOvaPvNfs(r.Destination.Client, r.Plan.Name, r.Plan.Provider.Source.Name) if err != nil { + r.Log.Error(err, "Failed to get ova PV") return } + pvName := getEntityName("pv", r.Source.Provider.Name, r.Plan.Name) if found { r.Log.Info("The PV for OVA NFS exists", "PV", pvName) return } + labels := map[string]string{"provider": r.Plan.Provider.Source.Name, "app": "forklift", "migration": r.Migration.Name, "plan": r.Plan.Name} pv := &core.PersistentVolume{ ObjectMeta: meta.ObjectMeta{ - Name: pvName, - // OwnerReferences: []meta.OwnerReference{ownerReference}, + Name: pvName, + Labels: labels, }, Spec: core.PersistentVolumeSpec{ Capacity: core.ResourceList{ @@ -1938,10 +1925,10 @@ func (r *KubeVirt) CreatePvForNfs() (err error) { return } -// TODO if we use ownership get it to the method func (r *KubeVirt) CreatePvcForNfs(pvcName string) (err error) { - found, err := r.getOvaPvcNfs() + _, found, err := GetOvaPvcNfs(r.Destination.Client, r.Plan.Name, r.Plan.Spec.TargetNamespace, r.Plan.Provider.Source.Name) if err != nil { + r.Log.Error(err, "Failed to get ova PVC") return } if found { @@ -1950,12 +1937,13 @@ func (r *KubeVirt) CreatePvcForNfs(pvcName string) (err error) { } sc := "" - pvName := fmt.Sprintf("ova-nfs-pv-%s", r.Plan.Name) + pvName := getEntityName("pv", r.Source.Provider.Name, r.Plan.Name) + labels := map[string]string{"provider": r.Plan.Provider.Source.Name, "app": "forklift", "migration": r.Migration.Name, "plan": r.Plan.Name} pvc := &core.PersistentVolumeClaim{ ObjectMeta: meta.ObjectMeta{ Name: pvcName, Namespace: r.Plan.Spec.TargetNamespace, - // OwnerReferences: []meta.OwnerReference{ownerReference}, + Labels: labels, }, Spec: core.PersistentVolumeClaimSpec{ Resources: core.ResourceRequirements{ @@ -1976,30 +1964,29 @@ func (r *KubeVirt) CreatePvcForNfs(pvcName string) (err error) { return } - // wait until pvc and pv are bounded. - timeout := time.After(5 * time.Minute) - tick := time.Tick(5 * time.Second) pvcNamespacedName := types.NamespacedName{ Namespace: r.Plan.Spec.TargetNamespace, Name: pvcName, } - for { - select { - case <-timeout: - r.Log.Error(err, "Timed out waiting for PVC to be bound") - return fmt.Errorf("timeout passed waiting for the OVA PVC %v", pvc) - case <-tick: - err = r.Get(context.TODO(), pvcNamespacedName, pvc) - if err != nil { - r.Log.Error(err, "Failed to bound OVA plan PVC") - return - } - if pvc.Status.Phase == "Bound" { - return - } + if err = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 45*time.Second, true, func(ctx context.Context) (done bool, err error) { + err = r.Get(context.TODO(), pvcNamespacedName, pvc) + if err != nil { + r.Log.Error(err, "Failed to get OVA plan PVC") + return false, err } + return pvc.Status.Phase == "Bound", nil + + }); err != nil { + r.Log.Error(err, "Failed to bind OVA PVC to PV ") + return + } + return nil +} + +func getEntityName(resourceType, providerName, planName string) string { + return fmt.Sprintf("ova-store-%s-%s-%s", resourceType, providerName, planName) } // Ensure the PV exist on the destination. diff --git a/pkg/controller/plan/migration.go b/pkg/controller/plan/migration.go index 66b771a53..b841a77a4 100644 --- a/pkg/controller/plan/migration.go +++ b/pkg/controller/plan/migration.go @@ -12,6 +12,7 @@ import ( "strings" "time" + "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/plan" "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter" plancontext "github.com/konveyor/forklift-controller/pkg/controller/plan/context" @@ -336,6 +337,14 @@ func (r *Migration) Archive() { return } + if r.Plan.Provider.Source.Type() == v1beta1.Ova { + err = r.deletePvcPvForOva() + if err != nil { + r.Log.Error(err, "Failed to cleanup the PVC and PV for OVA plan") + return + } + } + for _, vm := range r.Plan.Status.Migration.VMs { err = r.cleanup(vm) if err != nil { @@ -486,6 +495,41 @@ func (r *Migration) deleteImporterPods(vm *plan.VMStatus) (err error) { return } +func (r *Migration) deletePvcPvForOva() (err error) { + pvc, _, err := GetOvaPvcNfs(r.Destination.Client, r.Plan.Name, r.Plan.Spec.TargetNamespace, r.Plan.Provider.Source.Name) + if err != nil { + r.Log.Error(err, "Failed to get the plan PVC") + return + } + // The PVC was already deleted + if pvc == nil { + return + } + + err = r.Destination.Client.Delete(context.TODO(), pvc) + if err != nil { + r.Log.Error(err, "Failed to delete the plan PVC") + return + } + + pv, _, err := GetOvaPvNfs(r.Destination.Client, r.Plan.Name, r.Plan.Provider.Source.Name) + if err != nil { + r.Log.Error(err, "Failed to get the plan PV") + return + } + // The PV was already deleted + if pv == nil { + return + } + + err = r.Destination.Client.Delete(context.TODO(), pv) + if err != nil { + r.Log.Error(err, "Failed to delete the plan PV") + return + } + return +} + // Best effort attempt to resolve canceled refs. func (r *Migration) resolveCanceledRefs() { for i := range r.Context.Migration.Spec.Cancel { diff --git a/pkg/controller/provider/ova-setup.go b/pkg/controller/provider/ova-setup.go index 6933cd823..6a3e1507a 100644 --- a/pkg/controller/provider/ova-setup.go +++ b/pkg/controller/provider/ova-setup.go @@ -29,21 +29,21 @@ func (r Reconciler) CreateOVAServerDeployment(provider *api.Provider, ctx contex Name: provider.Name, UID: provider.UID, } - pvName := fmt.Sprintf("%s-pv-%s", ovaServerPrefix, provider.Name) + pvName := fmt.Sprintf("%s-pv-%s-%s", ovaServerPrefix, provider.Name, provider.Namespace) err := r.createPvForNfs(provider, ctx, ownerReference, pvName) if err != nil { - r.Log.Error(err, "Failed to create NFS PV for the OVA server") + r.Log.Error(err, "Failed to create PV for the OVA server") return } pvcName := fmt.Sprintf("%s-pvc-%s", ovaServerPrefix, provider.Name) err = r.createPvcForNfs(provider, ctx, ownerReference, pvName, pvcName) if err != nil { - r.Log.Error(err, "Failed to create NFS PVC for the OVA server") + r.Log.Error(err, "Failed to create PVC for the OVA server") return } - labels := map[string]string{"providerName": provider.Name, "app": "forklift"} + labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": "ova-server"} err = r.createServerDeployment(provider, ctx, ownerReference, pvcName, labels) if err != nil { r.Log.Error(err, "Failed to create OVA server deployment") @@ -59,17 +59,15 @@ func (r Reconciler) CreateOVAServerDeployment(provider *api.Provider, ctx contex func (r *Reconciler) createPvForNfs(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvName string) (err error) { splitted := strings.Split(provider.Spec.URL, ":") - if len(splitted) != 2 { - r.Log.Error(nil, "NFS server path doesn't contains: ", "provider", provider, "url", provider.Spec.URL) - return fmt.Errorf("wrong NFS server path") - } nfsServer := splitted[0] nfsPath := splitted[1] + labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": "ova-server"} pv := &core.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: pvName, OwnerReferences: []metav1.OwnerReference{ownerReference}, + Labels: labels, }, Spec: core.PersistentVolumeSpec{ Capacity: core.ResourceList{ @@ -95,11 +93,13 @@ func (r *Reconciler) createPvForNfs(provider *api.Provider, ctx context.Context, func (r *Reconciler) createPvcForNfs(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvName, pvcName string) (err error) { sc := "" + labels := map[string]string{"providerName": provider.Name, "app": "forklift", "subapp": "ova-server"} pvc := &core.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: provider.Namespace, OwnerReferences: []metav1.OwnerReference{ownerReference}, + Labels: labels, }, Spec: core.PersistentVolumeClaimSpec{ Resources: core.ResourceRequirements{ @@ -138,7 +138,9 @@ func (r *Reconciler) createServerDeployment(provider *api.Provider, ctx context. Replicas: &replicas, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - "app": "forklift", + "app": "forklift", + "provider": provider.Name, + "subapp": "ova-server", }, }, Template: core.PodTemplateSpec{ From 76919d9a2265d569445f873c4a77a6e7ecbd3a9c Mon Sep 17 00:00:00 2001 From: Bella Khizgiyaev Date: Wed, 4 Oct 2023 12:10:12 +0300 Subject: [PATCH 4/5] minor changes for ova-provider creation Signed-off-by: Bella Khizgiyaev --- pkg/controller/provider/ova-setup.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pkg/controller/provider/ova-setup.go b/pkg/controller/provider/ova-setup.go index 6a3e1507a..7731b551f 100644 --- a/pkg/controller/provider/ova-setup.go +++ b/pkg/controller/provider/ova-setup.go @@ -93,7 +93,7 @@ func (r *Reconciler) createPvForNfs(provider *api.Provider, ctx context.Context, func (r *Reconciler) createPvcForNfs(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvName, pvcName string) (err error) { sc := "" - labels := map[string]string{"providerName": provider.Name, "app": "forklift", "subapp": "ova-server"} + labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": "ova-server"} pvc := &core.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, @@ -137,11 +137,7 @@ func (r *Reconciler) createServerDeployment(provider *api.Provider, ctx context. Spec: appsv1.DeploymentSpec{ Replicas: &replicas, Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "forklift", - "provider": provider.Name, - "subapp": "ova-server", - }, + MatchLabels: labels, }, Template: core.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ From 5d1875c05df633d8f0f588314f77f1d24d9ac606 Mon Sep 17 00:00:00 2001 From: Arik Hadas Date: Wed, 4 Oct 2023 13:57:35 +0300 Subject: [PATCH 5/5] minor cleanup to recent OVA related changes Signed-off-by: Arik Hadas --- pkg/controller/plan/migration.go | 2 +- pkg/controller/provider/controller.go | 2 +- pkg/controller/provider/ova-setup.go | 16 ++++++++-------- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/controller/plan/migration.go b/pkg/controller/plan/migration.go index b841a77a4..9d3cde4cb 100644 --- a/pkg/controller/plan/migration.go +++ b/pkg/controller/plan/migration.go @@ -340,7 +340,7 @@ func (r *Migration) Archive() { if r.Plan.Provider.Source.Type() == v1beta1.Ova { err = r.deletePvcPvForOva() if err != nil { - r.Log.Error(err, "Failed to cleanup the PVC and PV for OVA plan") + r.Log.Error(err, "Failed to clean up the PVC and PV for the OVA plan") return } } diff --git a/pkg/controller/provider/controller.go b/pkg/controller/provider/controller.go index 073c01e7a..3178e11f3 100644 --- a/pkg/controller/provider/controller.go +++ b/pkg/controller/provider/controller.go @@ -187,7 +187,7 @@ func (r Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (r if provider.Type() == api.Ova { - deploymentName := fmt.Sprintf("%s-deployment-%s", ovaServerPrefix, provider.Name) + deploymentName := fmt.Sprintf("%s-deployment-%s", ovaServer, provider.Name) deployment := &appsv1.Deployment{} err = r.Get(context.TODO(), client.ObjectKey{ diff --git a/pkg/controller/provider/ova-setup.go b/pkg/controller/provider/ova-setup.go index 7731b551f..776b8cdf8 100644 --- a/pkg/controller/provider/ova-setup.go +++ b/pkg/controller/provider/ova-setup.go @@ -15,7 +15,7 @@ import ( ) const ( - ovaServerPrefix = "ova-server" + ovaServer = "ova-server" ovaImageVar = "OVA_PROVIDER_SERVER_IMAGE" nfsVolumeNamePrefix = "nfs-volume" mountPath = "/ova" @@ -29,21 +29,21 @@ func (r Reconciler) CreateOVAServerDeployment(provider *api.Provider, ctx contex Name: provider.Name, UID: provider.UID, } - pvName := fmt.Sprintf("%s-pv-%s-%s", ovaServerPrefix, provider.Name, provider.Namespace) + pvName := fmt.Sprintf("%s-pv-%s-%s", ovaServer, provider.Name, provider.Namespace) err := r.createPvForNfs(provider, ctx, ownerReference, pvName) if err != nil { r.Log.Error(err, "Failed to create PV for the OVA server") return } - pvcName := fmt.Sprintf("%s-pvc-%s", ovaServerPrefix, provider.Name) + pvcName := fmt.Sprintf("%s-pvc-%s", ovaServer, provider.Name) err = r.createPvcForNfs(provider, ctx, ownerReference, pvName, pvcName) if err != nil { r.Log.Error(err, "Failed to create PVC for the OVA server") return } - labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": "ova-server"} + labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": ovaServer} err = r.createServerDeployment(provider, ctx, ownerReference, pvcName, labels) if err != nil { r.Log.Error(err, "Failed to create OVA server deployment") @@ -61,7 +61,7 @@ func (r *Reconciler) createPvForNfs(provider *api.Provider, ctx context.Context, splitted := strings.Split(provider.Spec.URL, ":") nfsServer := splitted[0] nfsPath := splitted[1] - labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": "ova-server"} + labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": ovaServer} pv := &core.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ @@ -93,7 +93,7 @@ func (r *Reconciler) createPvForNfs(provider *api.Provider, ctx context.Context, func (r *Reconciler) createPvcForNfs(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvName, pvcName string) (err error) { sc := "" - labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": "ova-server"} + labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": ovaServer} pvc := &core.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, @@ -122,7 +122,7 @@ func (r *Reconciler) createPvcForNfs(provider *api.Provider, ctx context.Context } func (r *Reconciler) createServerDeployment(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvcName string, labels map[string]string) (err error) { - deploymentName := fmt.Sprintf("%s-deployment-%s", ovaServerPrefix, provider.Name) + deploymentName := fmt.Sprintf("%s-deployment-%s", ovaServer, provider.Name) annotations := make(map[string]string) var replicas int32 = 1 @@ -192,7 +192,7 @@ func (r *Reconciler) makeOvaProviderPodSpec(pvcName string, providerName string) } nfsVolumeName := fmt.Sprintf("%s-%s", nfsVolumeNamePrefix, providerName) - ovaContainerName := fmt.Sprintf("%s-pod-%s", ovaServerPrefix, providerName) + ovaContainerName := fmt.Sprintf("%s-pod-%s", ovaServer, providerName) return core.PodSpec{ Containers: []core.Container{