Skip to content

Commit

Permalink
MTV-1536 | Use CDI for disk transfer in cold migration
Browse files Browse the repository at this point in the history
Issues:
[1] Allow migration of "unknow" guests
Right now when we want to migrate an unknown and unsupported operating
system which is unsupported by the virt-v2v [3].

[2] Unifying the process and potential speedup
Right now we are using two different methods for the disk transfer. This
brings additional engineering for maintaining two paths. It's harder to
debug two different flows.
The virt-v2v transfers the disks in the sequence whereas using the CDI we
can start multiple disk imports in parallel. This can improve the
migration speeds.

Fix:
MTV is already using the CNV CDI for the warm and remote migration. We
just need to adjust the code to remove the virt-v2v transfer and rely on
the CNV CDI to do it for us.

Drawbacks:
- CNV CDI *requires* the VDDK, which was till now highly recommended.
- CNV CDI is not maintained inside the MTV and there might be problems
  escalating and backporting the patches as CNV has a different release
  cycle.
- Because we will be migrating all disks in parallel we need to
  optimise our migration scheduler as we don't want to take too much
  of the hosts/network resources. I have already done some
  optimisations in [4,5,6].

Notes:
This change removes the usage of virt-v2v and we will only use the
virt-v2v-in-place.

Ref:
[1] https://issues.redhat.com/browse/MTV-1536
[2] https://issues.redhat.com/browse/MTV-1581
[3] https://access.redhat.com/articles/1351473
[4] kubev2v#1088
[5] kubev2v#1087
[6] kubev2v#1086

Signed-off-by: Martin Necas <[email protected]>
  • Loading branch information
mnecas committed Oct 16, 2024
1 parent bc87852 commit 40b3f51
Show file tree
Hide file tree
Showing 12 changed files with 30 additions and 452 deletions.
19 changes: 0 additions & 19 deletions cmd/virt-v2v-monitor/BUILD.bazel

This file was deleted.

127 changes: 0 additions & 127 deletions cmd/virt-v2v-monitor/virt-v2v-monitor.go

This file was deleted.

25 changes: 0 additions & 25 deletions pkg/apis/forklift/v1beta1/plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/provider"
"github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/ref"
libcnd "github.com/konveyor/forklift-controller/pkg/lib/condition"
liberr "github.com/konveyor/forklift-controller/pkg/lib/error"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
Expand Down Expand Up @@ -93,30 +92,6 @@ type Plan struct {
Referenced `json:"-"`
}

// If the plan calls for the vm to be cold migrated to the local cluster, we can
// just use virt-v2v directly to convert the vm while copying data over. In other
// cases, we use CDI to transfer disks to the destination cluster and then use
// virt-v2v-in-place to convert these disks after cutover.
func (p *Plan) VSphereColdLocal() (bool, error) {
source := p.Referenced.Provider.Source
if source == nil {
return false, liberr.New("Cannot analyze plan, source provider is missing.")
}
destination := p.Referenced.Provider.Destination
if destination == nil {
return false, liberr.New("Cannot analyze plan, destination provider is missing.")
}

switch source.Type() {
case VSphere:
return !p.Spec.Warm && destination.IsHost(), nil
case Ova:
return true, nil
default:
return false, nil
}
}

// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PlanList struct {
meta.TypeMeta `json:",inline"`
Expand Down
32 changes: 10 additions & 22 deletions pkg/controller/plan/adapter/vsphere/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -434,28 +434,16 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config
if disk.Datastore.ID == ds.ID {
storageClass := mapped.Destination.StorageClass
var dvSource cdi.DataVolumeSource
coldLocal, vErr := r.Context.Plan.VSphereColdLocal()
if vErr != nil {
err = vErr
return
}
if coldLocal {
// Let virt-v2v do the copying
dvSource = cdi.DataVolumeSource{
Blank: &cdi.DataVolumeBlankImage{},
}
} else {
// Let CDI do the copying
dvSource = cdi.DataVolumeSource{
VDDK: &cdi.DataVolumeSourceVDDK{
BackingFile: r.baseVolume(disk.File),
UUID: vm.UUID,
URL: url,
SecretRef: secret.Name,
Thumbprint: thumbprint,
InitImageURL: r.Source.Provider.Spec.Settings[api.VDDK],
},
}
// Let CDI do the copying
dvSource = cdi.DataVolumeSource{
VDDK: &cdi.DataVolumeSourceVDDK{
BackingFile: r.baseVolume(disk.File),
UUID: vm.UUID,
URL: url,
SecretRef: secret.Name,
Thumbprint: thumbprint,
InitImageURL: r.Source.Provider.Spec.Settings[api.VDDK],
},
}
dvSpec := cdi.DataVolumeSpec{
Source: &dvSource,
Expand Down
7 changes: 0 additions & 7 deletions pkg/controller/plan/adapter/vsphere/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,13 +277,6 @@ func (r *Client) getChangeIds(vmRef ref.Ref, snapshotId string, hosts util.Hosts
}

func (r *Client) getClient(vm *model.VM, hosts util.HostsFunc) (client *vim25.Client, err error) {
if coldLocal, vErr := r.Plan.VSphereColdLocal(); vErr == nil && coldLocal {
// when virt-v2v runs the migration, forklift-controller should interact only
// with the component that serves the SDK endpoint of the provider
client = r.client.Client
return
}

if r.Source.Provider.Spec.Settings[v1beta1.SDK] == v1beta1.ESXI {
// when migrating from ESXi host, we use the client of the SDK endpoint of the provider,
// there's no need in a different client (the ESXi host is the only component involved in the migration)
Expand Down
52 changes: 4 additions & 48 deletions pkg/controller/plan/kubevirt.go
Original file line number Diff line number Diff line change
Expand Up @@ -824,18 +824,12 @@ func (r *KubeVirt) getListOptionsNamespaced() (listOptions *client.ListOptions)

// Ensure the guest conversion (virt-v2v) pod exists on the destination.
func (r *KubeVirt) EnsureGuestConversionPod(vm *plan.VMStatus, vmCr *VirtualMachine, pvcs []*core.PersistentVolumeClaim) (err error) {
labels := r.vmLabels(vm.Ref)
v2vSecret, err := r.ensureSecret(vm.Ref, r.secretDataSetterForCDI(vm.Ref), labels)
if err != nil {
return
}

configMap, err := r.ensureLibvirtConfigMap(vm.Ref, vmCr, pvcs)
if err != nil {
return
}

newPod, err := r.guestConversionPod(vm, vmCr.Spec.Template.Spec.Volumes, configMap, pvcs, v2vSecret)
newPod, err := r.guestConversionPod(vm, vmCr.Spec.Template.Spec.Volumes, configMap, pvcs)
if err != nil {
return
}
Expand Down Expand Up @@ -1667,7 +1661,7 @@ func (r *KubeVirt) findTemplate(vm *plan.VMStatus) (tmpl *template.Template, err
return
}

func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, v2vSecret *core.Secret) (pod *core.Pod, err error) {
func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim) (pod *core.Pod, err error) {
volumes, volumeMounts, volumeDevices, err := r.podVolumeMounts(vmVolumes, configMap, pvcs, vm)
if err != nil {
return
Expand All @@ -1684,34 +1678,6 @@ func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume,
user := qemuUser
nonRoot := true
allowPrivilageEscalation := false
// virt-v2v image
coldLocal, vErr := r.Context.Plan.VSphereColdLocal()
if vErr != nil {
err = vErr
return
}
if coldLocal {
// mount the secret for the password and CA certificate
volumes = append(volumes, core.Volume{
Name: "secret-volume",
VolumeSource: core.VolumeSource{
Secret: &core.SecretVolumeSource{
SecretName: v2vSecret.Name,
},
},
})
volumeMounts = append(volumeMounts, core.VolumeMount{
Name: "secret-volume",
ReadOnly: true,
MountPath: "/etc/secret",
})
} else {
environment = append(environment,
core.EnvVar{
Name: "V2V_inPlace",
Value: "1",
})
}
// VDDK image
var initContainers []core.Container
if vddkImage, found := r.Source.Provider.Spec.Settings[api.VDDK]; found {
Expand Down Expand Up @@ -1805,18 +1771,8 @@ func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume,
InitContainers: initContainers,
Containers: []core.Container{
{
Name: "virt-v2v",
Env: environment,
EnvFrom: []core.EnvFromSource{
{
Prefix: "V2V_",
SecretRef: &core.SecretEnvSource{
LocalObjectReference: core.LocalObjectReference{
Name: v2vSecret.Name,
},
},
},
},
Name: "virt-v2v",
Env: environment,
Image: Settings.Migration.VirtV2vImage,
VolumeMounts: volumeMounts,
VolumeDevices: volumeDevices,
Expand Down
Loading

0 comments on commit 40b3f51

Please sign in to comment.