Skip to content

Commit

Permalink
Delete PVC and PV from destination namespace when the plan is archived
Browse files Browse the repository at this point in the history
Signed-off-by: Bella Khizgiyaev <[email protected]>
  • Loading branch information
bkhizgiy committed Oct 4, 2023
1 parent 520baed commit ef76a87
Show file tree
Hide file tree
Showing 4 changed files with 100 additions and 66 deletions.
1 change: 1 addition & 0 deletions pkg/controller/plan/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/runtime",
"//vendor/k8s.io/apimachinery/pkg/types",
"//vendor/k8s.io/apimachinery/pkg/util/validation",
"//vendor/k8s.io/apimachinery/pkg/util/wait",
"//vendor/k8s.io/apiserver/pkg/storage/names",
"//vendor/k8s.io/client-go/kubernetes/scheme",
"//vendor/kubevirt.io/api/core/v1:core",
Expand Down
101 changes: 44 additions & 57 deletions pkg/controller/plan/kubevirt.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
k8svalidation "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/wait"
cnv "kubevirt.io/api/core/v1"
libvirtxml "libvirt.org/libvirt-go-xml"

Expand Down Expand Up @@ -1339,30 +1340,19 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi

switch r.Source.Provider.Type() {
case api.Ova:
pvcName := fmt.Sprintf("ova-nfs-pvc-%s-%s", r.Source.Provider.Name, r.Plan.Name)
/*
TODO: set ownerReferences the PV and PVC deleted once the plan is done.
cross namespaces are not possible to be used for plan/provider ownership.
optional: PV+PVC per VM, deleted via cleanup. ownership: importer pod? DV?
ownerReference := meta.OwnerReference{
APIVersion: "forklift.konveyor.io/v1beta1",
Kind: "Plan",
Name: r.Plan.Name,
UID: r.Plan.UID,
}
*/
err = r.CreatePvForNfs()
if err != nil {
return
}
pvcName := getEntityName("pvc", r.Source.Provider.Name, r.Plan.Name)
err = r.CreatePvcForNfs(pvcName)
if err != nil {
return
}

//path from disk
volumes = append(volumes, core.Volume{
Name: "nfs-pv",
Name: "store-pv",
VolumeSource: core.VolumeSource{
PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
Expand All @@ -1379,7 +1369,7 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi
MountPath: "/opt",
},
core.VolumeMount{
Name: "nfs-pv",
Name: "store-pv",
MountPath: "/ova",
},
)
Expand Down Expand Up @@ -1848,72 +1838,69 @@ func (r *KubeVirt) EnsurePersistentVolume(vmRef ref.Ref, persistentVolumes []cor
return
}

func (r *KubeVirt) getOvaPvNfs() (found bool, err error) {
pv := &core.PersistentVolume{}
err = r.Destination.Client.Get(
func GetOvaPvNfs(client client.Client, planName string, providerName string) (pv *core.PersistentVolume, found bool, err error) {
pv = &core.PersistentVolume{}
err = client.Get(
context.TODO(),
types.NamespacedName{
Name: fmt.Sprintf("ova-nfs-pv-%s", r.Plan.Name),
Name: getEntityName("pv", providerName, planName),
},
pv,
)

if err != nil {
if k8serr.IsNotFound(err) {
return false, nil
return nil, false, nil
}
err = liberr.Wrap(err)
return
}
return true, nil
return
}

func (r *KubeVirt) getOvaPvcNfs() (found bool, err error) {
pvc := &core.PersistentVolumeClaim{}
err = r.Destination.Client.Get(
func GetOvaPvcNfs(client client.Client, planName string, planNamespace string, providerName string) (pvc *core.PersistentVolumeClaim, found bool, err error) {
pvc = &core.PersistentVolumeClaim{}
err = client.Get(
context.TODO(),
types.NamespacedName{
Name: fmt.Sprintf("ova-nfs-pvc-%s-%s", r.Source.Provider.Name, r.Plan.Name),
Namespace: r.Plan.Spec.TargetNamespace,
Name: getEntityName("pvc", providerName, planName),
Namespace: planNamespace,
},
pvc,
)

if err != nil {
if k8serr.IsNotFound(err) {
return false, nil
return nil, false, nil
}
err = liberr.Wrap(err)
return
}
return true, nil
return
}

// TODO if we use ownership get it to the method
func (r *KubeVirt) CreatePvForNfs() (err error) {
sourceProvider := r.Source.Provider
splitted := strings.Split(sourceProvider.Spec.URL, ":")
if len(splitted) != 2 {
r.Log.Error(nil, "NFS server path doesn't contains: ", "url", sourceProvider.Spec.URL)
return fmt.Errorf("bad source provider %s URL %s", sourceProvider.Name, sourceProvider.Spec.URL)
}
nfsServer := splitted[0]
nfsPath := splitted[1]

pvName := fmt.Sprintf("ova-nfs-pv-%s", r.Plan.Name)
found, err := r.getOvaPvNfs()
_, found, err := GetOvaPvNfs(r.Destination.Client, r.Plan.Name, r.Plan.Provider.Source.Name)
if err != nil {
r.Log.Error(err, "Failed to get ova PV")
return
}
pvName := getEntityName("pv", r.Source.Provider.Name, r.Plan.Name)
if found {
r.Log.Info("The PV for OVA NFS exists", "PV", pvName)
return
}

labels := map[string]string{"provider": r.Plan.Provider.Source.Name, "app": "forklift", "migration": r.Migration.Name, "plan": r.Plan.Name}
pv := &core.PersistentVolume{
ObjectMeta: meta.ObjectMeta{
Name: pvName,
// OwnerReferences: []meta.OwnerReference{ownerReference},
Name: pvName,
Labels: labels,
},
Spec: core.PersistentVolumeSpec{
Capacity: core.ResourceList{
Expand All @@ -1938,10 +1925,10 @@ func (r *KubeVirt) CreatePvForNfs() (err error) {
return
}

// TODO if we use ownership get it to the method
func (r *KubeVirt) CreatePvcForNfs(pvcName string) (err error) {
found, err := r.getOvaPvcNfs()
_, found, err := GetOvaPvcNfs(r.Destination.Client, r.Plan.Name, r.Plan.Spec.TargetNamespace, r.Plan.Provider.Source.Name)
if err != nil {
r.Log.Error(err, "Failed to get ova PVC")
return
}
if found {
Expand All @@ -1950,12 +1937,13 @@ func (r *KubeVirt) CreatePvcForNfs(pvcName string) (err error) {
}

sc := ""
pvName := fmt.Sprintf("ova-nfs-pv-%s", r.Plan.Name)
pvName := getEntityName("pv", r.Source.Provider.Name, r.Plan.Name)
labels := map[string]string{"provider": r.Plan.Provider.Source.Name, "app": "forklift", "migration": r.Migration.Name, "plan": r.Plan.Name}
pvc := &core.PersistentVolumeClaim{
ObjectMeta: meta.ObjectMeta{
Name: pvcName,
Namespace: r.Plan.Spec.TargetNamespace,
// OwnerReferences: []meta.OwnerReference{ownerReference},
Labels: labels,
},
Spec: core.PersistentVolumeClaimSpec{
Resources: core.ResourceRequirements{
Expand All @@ -1976,30 +1964,29 @@ func (r *KubeVirt) CreatePvcForNfs(pvcName string) (err error) {
return
}

// wait until pvc and pv are bounded.
timeout := time.After(5 * time.Minute)
tick := time.Tick(5 * time.Second)
pvcNamespacedName := types.NamespacedName{
Namespace: r.Plan.Spec.TargetNamespace,
Name: pvcName,
}

for {
select {
case <-timeout:
r.Log.Error(err, "Timed out waiting for PVC to be bound")
return fmt.Errorf("timeout passed waiting for the OVA PVC %v", pvc)
case <-tick:
err = r.Get(context.TODO(), pvcNamespacedName, pvc)
if err != nil {
r.Log.Error(err, "Failed to bound OVA plan PVC")
return
}
if pvc.Status.Phase == "Bound" {
return
}
if err = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 45*time.Second, true, func(ctx context.Context) (done bool, err error) {
err = r.Get(context.TODO(), pvcNamespacedName, pvc)
if err != nil {
r.Log.Error(err, "Failed to get OVA plan PVC")
return false, err
}
return pvc.Status.Phase == "Bound", nil

}); err != nil {
r.Log.Error(err, "Failed to bind OVA PVC to PV ")
return

}
return nil
}

func getEntityName(resourceType, providerName, planName string) string {
return fmt.Sprintf("ova-store-%s-%s-%s", resourceType, providerName, planName)
}

// Ensure the PV exist on the destination.
Expand Down
44 changes: 44 additions & 0 deletions pkg/controller/plan/migration.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"strings"
"time"

"github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1"
"github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/plan"
"github.com/konveyor/forklift-controller/pkg/controller/plan/adapter"
plancontext "github.com/konveyor/forklift-controller/pkg/controller/plan/context"
Expand Down Expand Up @@ -336,6 +337,14 @@ func (r *Migration) Archive() {
return
}

if r.Plan.Provider.Source.Type() == v1beta1.Ova {
err = r.deletePvcPvForOva()
if err != nil {
r.Log.Error(err, "Failed to cleanup the PVC and PV for OVA plan")
return
}
}

for _, vm := range r.Plan.Status.Migration.VMs {
err = r.cleanup(vm)
if err != nil {
Expand Down Expand Up @@ -486,6 +495,41 @@ func (r *Migration) deleteImporterPods(vm *plan.VMStatus) (err error) {
return
}

func (r *Migration) deletePvcPvForOva() (err error) {
pvc, _, err := GetOvaPvcNfs(r.Destination.Client, r.Plan.Name, r.Plan.Spec.TargetNamespace, r.Plan.Provider.Source.Name)
if err != nil {
r.Log.Error(err, "Failed to get the plan PVC")
return
}
// The PVC was already deleted
if pvc == nil {
return
}

err = r.Destination.Client.Delete(context.TODO(), pvc)
if err != nil {
r.Log.Error(err, "Failed to delete the plan PVC")
return
}

pv, _, err := GetOvaPvNfs(r.Destination.Client, r.Plan.Name, r.Plan.Provider.Source.Name)
if err != nil {
r.Log.Error(err, "Failed to get the plan PV")
return
}
// The PV was already deleted
if pv == nil {
return
}

err = r.Destination.Client.Delete(context.TODO(), pv)
if err != nil {
r.Log.Error(err, "Failed to delete the plan PV")
return
}
return
}

// Best effort attempt to resolve canceled refs.
func (r *Migration) resolveCanceledRefs() {
for i := range r.Context.Migration.Spec.Cancel {
Expand Down
20 changes: 11 additions & 9 deletions pkg/controller/provider/ova-setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,21 +29,21 @@ func (r Reconciler) CreateOVAServerDeployment(provider *api.Provider, ctx contex
Name: provider.Name,
UID: provider.UID,
}
pvName := fmt.Sprintf("%s-pv-%s", ovaServerPrefix, provider.Name)
pvName := fmt.Sprintf("%s-pv-%s-%s", ovaServerPrefix, provider.Name, provider.Namespace)
err := r.createPvForNfs(provider, ctx, ownerReference, pvName)
if err != nil {
r.Log.Error(err, "Failed to create NFS PV for the OVA server")
r.Log.Error(err, "Failed to create PV for the OVA server")
return
}

pvcName := fmt.Sprintf("%s-pvc-%s", ovaServerPrefix, provider.Name)
err = r.createPvcForNfs(provider, ctx, ownerReference, pvName, pvcName)
if err != nil {
r.Log.Error(err, "Failed to create NFS PVC for the OVA server")
r.Log.Error(err, "Failed to create PVC for the OVA server")
return
}

labels := map[string]string{"providerName": provider.Name, "app": "forklift"}
labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": "ova-server"}
err = r.createServerDeployment(provider, ctx, ownerReference, pvcName, labels)
if err != nil {
r.Log.Error(err, "Failed to create OVA server deployment")
Expand All @@ -59,17 +59,15 @@ func (r Reconciler) CreateOVAServerDeployment(provider *api.Provider, ctx contex

func (r *Reconciler) createPvForNfs(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvName string) (err error) {
splitted := strings.Split(provider.Spec.URL, ":")
if len(splitted) != 2 {
r.Log.Error(nil, "NFS server path doesn't contains: ", "provider", provider, "url", provider.Spec.URL)
return fmt.Errorf("wrong NFS server path")
}
nfsServer := splitted[0]
nfsPath := splitted[1]
labels := map[string]string{"provider": provider.Name, "app": "forklift", "subapp": "ova-server"}

pv := &core.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: pvName,
OwnerReferences: []metav1.OwnerReference{ownerReference},
Labels: labels,
},
Spec: core.PersistentVolumeSpec{
Capacity: core.ResourceList{
Expand All @@ -95,11 +93,13 @@ func (r *Reconciler) createPvForNfs(provider *api.Provider, ctx context.Context,

func (r *Reconciler) createPvcForNfs(provider *api.Provider, ctx context.Context, ownerReference metav1.OwnerReference, pvName, pvcName string) (err error) {
sc := ""
labels := map[string]string{"providerName": provider.Name, "app": "forklift", "subapp": "ova-server"}
pvc := &core.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: pvcName,
Namespace: provider.Namespace,
OwnerReferences: []metav1.OwnerReference{ownerReference},
Labels: labels,
},
Spec: core.PersistentVolumeClaimSpec{
Resources: core.ResourceRequirements{
Expand Down Expand Up @@ -138,7 +138,9 @@ func (r *Reconciler) createServerDeployment(provider *api.Provider, ctx context.
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "forklift",
"app": "forklift",
"provider": provider.Name,
"subapp": "ova-server",
},
},
Template: core.PodTemplateSpec{
Expand Down

0 comments on commit ef76a87

Please sign in to comment.