Skip to content

Commit

Permalink
MTV-1804 | Implement VDDK AIO buffer configuration
Browse files Browse the repository at this point in the history
Issue:
The scale and perf team found a way how to improve the transfer speeds.
Right now the only way to enable this feature is to set the v2v extra
vars. The v2v extra vars pass the configuration to the virt-v2v and
virt-v2v-in-place. The v2v extra vars configuration is general and not
specific for VDDK. This causes the warm migration which uses the
virt-v2v-in-place to fail as it does not use any VDDK parameters.
Those parameters should be passed to the CNV CDI instead.

Fix:
Add a way to easily enable and configure the AIO.
This feature is VDDK and provider-specific as it requires to have
specific vSphere and VDDK versions. So we can't enable this feature
globally nor by default. So this PR adds the configuration to the
Provider spec settings and create a configmap with the necessary
configuration and either mounts the configmap to the guest conversion
pod for cold migration or passes the configmap name to the CDI DV
annotation.

Example:
```
apiVersion: forklift.konveyor.io/v1beta1
kind: Provider
metadata:
  name: vsphere
  namespace: forklift
spec:
  settings:
    sdkEndpoint: vcenter
    useVddkAioOptimization: 'true'
    vddkAioBufSize: 16 // optional defaults to 16
    vddkAioBufCount: 4 // optional defaults to 4
    vddkInitImage: 'quay.io/xiaodwan/vddk:8'
  type: vsphere
```

Ref:
- https://issues.redhat.com/browse/MTV-1804
- kubevirt/containerized-data-importer#3572
- https://docs.redhat.com/en/documentation/migration_toolkit_for_virtualization/2.7/html-single/installing_and_using_the_migration_toolkit_for_virtualization/index#mtv-aio-buffer_mtv

Signed-off-by: Martin Necas <[email protected]>
  • Loading branch information
mnecas committed Dec 18, 2024
1 parent 5bb3c1c commit 9b17fb9
Show file tree
Hide file tree
Showing 6 changed files with 186 additions and 21 deletions.
25 changes: 21 additions & 4 deletions pkg/apis/forklift/v1beta1/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
libcnd "github.com/konveyor/forklift-controller/pkg/lib/condition"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"strconv"
)

type ProviderType string
Expand Down Expand Up @@ -59,10 +60,13 @@ const (

// Provider settings.
const (
VDDK = "vddkInitImage"
SDK = "sdkEndpoint"
VCenter = "vcenter"
ESXI = "esxi"
VDDK = "vddkInitImage"
SDK = "sdkEndpoint"
VCenter = "vcenter"
ESXI = "esxi"
UseVddkAioOptimization = "useVddkAioOptimization"
VddkAioBufSize = "vddkAioBufSize"
VddkAioBufCount = "vddkAioBufCount"
)

const OvaProviderFinalizer = "forklift/ova-provider"
Expand Down Expand Up @@ -147,3 +151,16 @@ func (p *Provider) HasReconciled() bool {
func (p *Provider) RequiresConversion() bool {
return p.Type() == VSphere || p.Type() == Ova
}

// This provider requires VM guest conversion.
func (p *Provider) UseVddkAioOptimization() bool {
useVddkAioOptimization := p.Spec.Settings[UseVddkAioOptimization]
if useVddkAioOptimization == "" {
return false
}
parseBool, err := strconv.ParseBool(useVddkAioOptimization)
if err != nil {
return false
}
return parseBool
}
4 changes: 4 additions & 0 deletions pkg/controller/plan/adapter/base/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ const (

// DV immediate bind to WaitForFirstConsumer storage class
AnnBindImmediate = "cdi.kubevirt.io/storage.bind.immediate.requested"

// Add extra vddk configmap, in the Forklift used to pass AIO configuration to the VDDK.
// Related to https://github.com/kubevirt/containerized-data-importer/pull/3572
AnnVddkExtraArgs = "cdi.kubevirt.io/storage.pod.vddk.extraargs"
)

var VolumePopulatorNotSupportedError = liberr.New("provider does not support volume populators")
Expand Down
9 changes: 9 additions & 0 deletions pkg/controller/plan/adapter/vsphere/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ const (
AnnImportBackingFile = "cdi.kubevirt.io/storage.import.backingFile"
)

const VddkConf = "vddk-conf"

// Map of vmware guest ids to osinfo ids.
var osMap = map[string]string{
"centos64Guest": "centos5.11",
Expand Down Expand Up @@ -144,6 +146,10 @@ type Builder struct {
macConflictsMap map[string]string
}

func genVddkConfConfigMapName(plan *api.Plan) string {
return fmt.Sprintf("%s-%s", plan.Name, VddkConf)
}

// Get list of destination VMs with mac addresses that would
// conflict with this VM, if any exist.
func (r *Builder) macConflicts(vm *model.VM) (conflictingVMs []string, err error) {
Expand Down Expand Up @@ -483,6 +489,9 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config
dv.ObjectMeta.Annotations = make(map[string]string)
}
dv.ObjectMeta.Annotations[planbase.AnnDiskSource] = r.baseVolume(disk.File)
if !coldLocal && r.Source.Provider.UseVddkAioOptimization() {
dv.ObjectMeta.Annotations[planbase.AnnVddkExtraArgs] = genVddkConfConfigMapName(r.Plan)
}
dvs = append(dvs, *dv)
}
}
Expand Down
141 changes: 134 additions & 7 deletions pkg/controller/plan/kubevirt.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,14 @@ const (
OvaPVLabel = "nfs-pv"
)

const ExtraV2vConf = "extra-v2v-conf"
// Vddk v2v conf
const (
ExtraV2vConf = "extra-v2v-conf"
VddkConf = "vddk-conf"

VddkAioBufSizeDefault = "16"
VddkAioBufCountDefault = "4"
)

// Map of VirtualMachines keyed by vmID.
type VirtualMachineMap map[string]VirtualMachine
Expand Down Expand Up @@ -242,6 +249,10 @@ func genExtraV2vConfConfigMapName(plan *api.Plan) string {
return fmt.Sprintf("%s-%s", plan.Name, ExtraV2vConf)
}

func genVddkConfConfigMapName(plan *api.Plan) string {
return fmt.Sprintf("%s-%s", plan.Name, VddkConf)
}

// Get the importer pod for a PersistentVolumeClaim.
func (r *KubeVirt) GetImporterPod(pvc core.PersistentVolumeClaim) (pod *core.Pod, found bool, err error) {
pod = &core.Pod{}
Expand Down Expand Up @@ -583,6 +594,12 @@ func (r *KubeVirt) DataVolumes(vm *plan.VMStatus) (dataVolumes []cdi.DataVolume,
if err != nil {
return
}
if r.Source.Provider.UseVddkAioOptimization() {
_, err = r.ensureVddkConfigMap()
if err != nil {
return nil, err
}
}

dataVolumes, err = r.dataVolumes(vm, secret, configMap)
if err != nil {
Expand Down Expand Up @@ -641,6 +658,83 @@ func (r *KubeVirt) EnsureDataVolumes(vm *plan.VMStatus, dataVolumes []cdi.DataVo
return
}

func (r *KubeVirt) vddkConfigMap(labels map[string]string) (*core.ConfigMap, error) {
data := make(map[string]string)
if r.Source.Provider.UseVddkAioOptimization() {
buffSize := r.Source.Provider.Spec.Settings[api.VddkAioBufSize]
if buffSize == "" {
buffSize = VddkAioBufSizeDefault
}
buffCount := r.Source.Provider.Spec.Settings[api.VddkAioBufCount]
if buffCount == "" {
buffCount = VddkAioBufCountDefault
}
data["vddk-config-file"] = fmt.Sprintf(
"VixDiskLib.nfcAio.Session.BufSizeIn64K=%s\n"+
"VixDiskLib.nfcAio.Session.BufCount=%s", buffSize, buffCount)
}
configMap := core.ConfigMap{
Data: data,
ObjectMeta: metav1.ObjectMeta{
Name: genVddkConfConfigMapName(r.Plan),
Namespace: r.Plan.Spec.TargetNamespace,
Labels: labels,
},
}
return &configMap, nil
}

func (r *KubeVirt) ensureVddkConfigMap() (configMap *core.ConfigMap, err error) {
labels := r.vddkLabels()
newConfigMap, err := r.vddkConfigMap(labels)
if err != nil {
return
}

list := &core.ConfigMapList{}
err = r.Destination.Client.List(
context.TODO(),
list,
&client.ListOptions{
LabelSelector: k8slabels.SelectorFromSet(labels),
Namespace: r.Plan.Spec.TargetNamespace,
},
)
if err != nil {
err = liberr.Wrap(err)
return
}
if len(list.Items) > 0 {
configMap = &list.Items[0]
configMap.Data = newConfigMap.Data
err = r.Destination.Client.Update(context.TODO(), configMap)
if err != nil {
err = liberr.Wrap(err)
return
}
r.Log.V(1).Info(
"VDDK extra args configmap updated.",
"configmap",
path.Join(
configMap.Namespace,
configMap.Name))
} else {
configMap = newConfigMap
err = r.Destination.Client.Create(context.TODO(), configMap)
if err != nil {
err = liberr.Wrap(err)
return
}
r.Log.V(1).Info(
"VDDK extra args configmap created.",
"configmap",
path.Join(
configMap.Namespace,
configMap.Name))
}
return
}

func (r *KubeVirt) EnsurePopulatorVolumes(vm *plan.VMStatus, pvcs []*core.PersistentVolumeClaim) (err error) {
var pendingPvcNames []string
for _, pvc := range pvcs {
Expand Down Expand Up @@ -850,12 +944,17 @@ func (r *KubeVirt) EnsureGuestConversionPod(vm *plan.VMStatus, vmCr *VirtualMach
return
}

configMap, err := r.ensureLibvirtConfigMap(vm.Ref, vmCr, pvcs)
libvirtConfigMap, err := r.ensureLibvirtConfigMap(vm.Ref, vmCr, pvcs)
if err != nil {
return
}

vddkConfigmap, err := r.ensureVddkConfigMap()
if err != nil {
return
}

newPod, err := r.guestConversionPod(vm, vmCr.Spec.Template.Spec.Volumes, configMap, pvcs, v2vSecret)
newPod, err := r.guestConversionPod(vm, vmCr.Spec.Template.Spec.Volumes, libvirtConfigMap, vddkConfigmap, pvcs, v2vSecret)
if err != nil {
return
}
Expand Down Expand Up @@ -1690,8 +1789,8 @@ func (r *KubeVirt) findTemplate(vm *plan.VMStatus) (tmpl *template.Template, err
return
}

func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, v2vSecret *core.Secret) (pod *core.Pod, err error) {
volumes, volumeMounts, volumeDevices, err := r.podVolumeMounts(vmVolumes, configMap, pvcs, vm)
func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, libvirtConfigMap *core.ConfigMap, vddkConfigmap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, v2vSecret *core.Secret) (pod *core.Pod, err error) {
volumes, volumeMounts, volumeDevices, err := r.podVolumeMounts(vmVolumes, libvirtConfigMap, vddkConfigmap, pvcs, vm)
if err != nil {
return
}
Expand Down Expand Up @@ -1892,7 +1991,7 @@ func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume,
return
}

func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, vm *plan.VMStatus) (volumes []core.Volume, mounts []core.VolumeMount, devices []core.VolumeDevice, err error) {
func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, libvirtConfigMap *core.ConfigMap, vddkConfigmap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, vm *plan.VMStatus) (volumes []core.Volume, mounts []core.VolumeMount, devices []core.VolumeDevice, err error) {
pvcsByName := make(map[string]*core.PersistentVolumeClaim)
for _, pvc := range pvcs {
pvcsByName[pvc.Name] = pvc
Expand Down Expand Up @@ -1930,7 +2029,7 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi
VolumeSource: core.VolumeSource{
ConfigMap: &core.ConfigMapVolumeSource{
LocalObjectReference: core.LocalObjectReference{
Name: configMap.Name,
Name: libvirtConfigMap.Name,
},
},
},
Expand All @@ -1949,6 +2048,19 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi
},
})
}
useVddkConf := r.Source.Provider.UseVddkAioOptimization()
if useVddkConf {
volumes = append(volumes, core.Volume{
Name: VddkConf,
VolumeSource: core.VolumeSource{
ConfigMap: &core.ConfigMapVolumeSource{
LocalObjectReference: core.LocalObjectReference{
Name: vddkConfigmap.Name,
},
},
},
})
}

switch r.Source.Provider.Type() {
case api.Ova:
Expand Down Expand Up @@ -2006,6 +2118,14 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi
},
)
}
if useVddkConf {
mounts = append(mounts,
core.VolumeMount{
Name: VddkConf,
MountPath: fmt.Sprintf("/mnt/%s", VddkConf),
},
)
}
}

_, exists, err := r.findConfigMapInNamespace(Settings.VirtCustomizeConfigMap, r.Plan.Spec.TargetNamespace)
Expand Down Expand Up @@ -2389,6 +2509,13 @@ func (r *KubeVirt) vmLabels(vmRef ref.Ref) (labels map[string]string) {
return
}

// Labels for a VM on a plan.
func (r *KubeVirt) vddkLabels() (labels map[string]string) {
labels = r.planLabels()
labels[VddkConf] = VddkConf
return
}

// Labels for a VM on a plan without migration label.
func (r *KubeVirt) vmAllButMigrationLabels(vmRef ref.Ref) (labels map[string]string) {
labels = r.vmLabels(vmRef)
Expand Down
11 changes: 9 additions & 2 deletions virt-v2v/cmd/entrypoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package main
import (
_ "embed"
"encoding/json"
"errors"
"fmt"
"io"
"os"
Expand Down Expand Up @@ -144,12 +145,18 @@ func virtV2vVsphereArgs() (args []string, err error) {
if err != nil {
return nil, err
}
if info, err := os.Stat(global.VDDK); err == nil && info.IsDir() {
if info, err := os.Stat(global.VDDK_LIB); err == nil && info.IsDir() {
args = append(args,
"-it", "vddk",
"-io", fmt.Sprintf("vddk-libdir=%s", global.VDDK),
"-io", fmt.Sprintf("vddk-libdir=%s", global.VDDK_LIB),
"-io", fmt.Sprintf("vddk-thumbprint=%s", os.Getenv("V2V_fingerprint")),
)
// Check if the config file exists but still allow the extra args to override the vddk-config for testing
if _, err := os.Stat(global.VDDK_CONF_FILE); !errors.Is(err, os.ErrNotExist) && os.Getenv("V2V_extra_args") != "" {
args = append(args,
"-io", fmt.Sprintf("vddk-config=%s", global.VDDK_CONF_FILE),
)
}
}

// When converting VM with name that do not meet DNS1123 RFC requirements,
Expand Down
17 changes: 9 additions & 8 deletions virt-v2v/pkg/global/variables.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,15 @@ package global
type MountPath string

const (
OVA = "ova"
VSPHERE = "vSphere"
DIR = "/var/tmp/v2v"
INSPECTION = "/var/tmp/v2v/inspection.xml"
FS MountPath = "/mnt/disks/disk[0-9]*"
BLOCK MountPath = "/dev/block[0-9]*"
VDDK = "/opt/vmware-vix-disklib-distrib"
LUKSDIR = "/etc/luks"
OVA = "ova"
VSPHERE = "vSphere"
DIR = "/var/tmp/v2v"
INSPECTION = "/var/tmp/v2v/inspection.xml"
FS MountPath = "/mnt/disks/disk[0-9]*"
BLOCK MountPath = "/dev/block[0-9]*"
VDDK_LIB = "/opt/vmware-vix-disklib-distrib"
LUKSDIR = "/etc/luks"
VDDK_CONF_FILE = "/mnt/vddk-conf/vddk-config-file"

WIN_FIRSTBOOT_PATH = "/Program Files/Guestfs/Firstboot"
WIN_FIRSTBOOT_SCRIPTS_PATH = "/Program Files/Guestfs/Firstboot/scripts"
Expand Down

0 comments on commit 9b17fb9

Please sign in to comment.