diff --git a/pkg/apis/forklift/v1beta1/provider.go b/pkg/apis/forklift/v1beta1/provider.go
index 7227d58e4..b5e06a7f3 100644
--- a/pkg/apis/forklift/v1beta1/provider.go
+++ b/pkg/apis/forklift/v1beta1/provider.go
@@ -20,6 +20,7 @@ import (
 	libcnd "github.com/konveyor/forklift-controller/pkg/lib/condition"
 	core "k8s.io/api/core/v1"
 	meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"strconv"
 )
 
 type ProviderType string
@@ -59,10 +60,13 @@ const (
 
 // Provider settings.
 const (
-	VDDK    = "vddkInitImage"
-	SDK     = "sdkEndpoint"
-	VCenter = "vcenter"
-	ESXI    = "esxi"
+	VDDK                   = "vddkInitImage"
+	SDK                    = "sdkEndpoint"
+	VCenter                = "vcenter"
+	ESXI                   = "esxi"
+	UseVddkAioOptimization = "useVddkAioOptimization"
+	VddkAioBufSize         = "vddkAioBufSize"
+	VddkAioBufCount        = "vddkAioBufCount"
 )
 
 const OvaProviderFinalizer = "forklift/ova-provider"
@@ -147,3 +151,16 @@ func (p *Provider) HasReconciled() bool {
 func (p *Provider) RequiresConversion() bool {
 	return p.Type() == VSphere || p.Type() == Ova
 }
+
+// This provider requires VM guest conversion.
+func (p *Provider) UseVddkAioOptimization() bool {
+	useVddkAioOptimization := p.Spec.Settings[UseVddkAioOptimization]
+	if useVddkAioOptimization == "" {
+		return false
+	}
+	parseBool, err := strconv.ParseBool(useVddkAioOptimization)
+	if err != nil {
+		return false
+	}
+	return parseBool
+}
diff --git a/pkg/controller/plan/adapter/base/doc.go b/pkg/controller/plan/adapter/base/doc.go
index b57ae58ef..27393dacc 100644
--- a/pkg/controller/plan/adapter/base/doc.go
+++ b/pkg/controller/plan/adapter/base/doc.go
@@ -34,6 +34,10 @@ const (
 
 	// DV immediate bind to WaitForFirstConsumer storage class
 	AnnBindImmediate = "cdi.kubevirt.io/storage.bind.immediate.requested"
+
+	// Add extra vddk configmap, in the Forklift used to pass AIO configuration to the VDDK.
+	// Related to https://github.com/kubevirt/containerized-data-importer/pull/3572
+	AnnVddkExtraArgs = "cdi.kubevirt.io/storage.pod.vddk.extraargs"
 )
 
 var VolumePopulatorNotSupportedError = liberr.New("provider does not support volume populators")
diff --git a/pkg/controller/plan/adapter/vsphere/builder.go b/pkg/controller/plan/adapter/vsphere/builder.go
index c68e7f18a..5f89bdd74 100644
--- a/pkg/controller/plan/adapter/vsphere/builder.go
+++ b/pkg/controller/plan/adapter/vsphere/builder.go
@@ -81,6 +81,8 @@ const (
 	AnnImportBackingFile = "cdi.kubevirt.io/storage.import.backingFile"
 )
 
+const VddkConf = "vddk-conf"
+
 // Map of vmware guest ids to osinfo ids.
 var osMap = map[string]string{
 	"centos64Guest":              "centos5.11",
@@ -144,6 +146,10 @@ type Builder struct {
 	macConflictsMap map[string]string
 }
 
+func genVddkConfConfigMapName(plan *api.Plan) string {
+	return fmt.Sprintf("%s-%s", plan.Name, VddkConf)
+}
+
 // Get list of destination VMs with mac addresses that would
 // conflict with this VM, if any exist.
 func (r *Builder) macConflicts(vm *model.VM) (conflictingVMs []string, err error) {
@@ -483,6 +489,9 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config
 					dv.ObjectMeta.Annotations = make(map[string]string)
 				}
 				dv.ObjectMeta.Annotations[planbase.AnnDiskSource] = r.baseVolume(disk.File)
+				if !coldLocal && r.Source.Provider.UseVddkAioOptimization() {
+					dv.ObjectMeta.Annotations[planbase.AnnVddkExtraArgs] = genVddkConfConfigMapName(r.Plan)
+				}
 				dvs = append(dvs, *dv)
 			}
 		}
diff --git a/pkg/controller/plan/kubevirt.go b/pkg/controller/plan/kubevirt.go
index 06cb04e87..3091ef2cf 100644
--- a/pkg/controller/plan/kubevirt.go
+++ b/pkg/controller/plan/kubevirt.go
@@ -111,7 +111,14 @@ const (
 	OvaPVLabel  = "nfs-pv"
 )
 
-const ExtraV2vConf = "extra-v2v-conf"
+// Vddk v2v conf
+const (
+	ExtraV2vConf = "extra-v2v-conf"
+	VddkConf     = "vddk-conf"
+
+	VddkAioBufSizeDefault  = "16"
+	VddkAioBufCountDefault = "4"
+)
 
 // Map of VirtualMachines keyed by vmID.
 type VirtualMachineMap map[string]VirtualMachine
@@ -242,6 +249,10 @@ func genExtraV2vConfConfigMapName(plan *api.Plan) string {
 	return fmt.Sprintf("%s-%s", plan.Name, ExtraV2vConf)
 }
 
+func genVddkConfConfigMapName(plan *api.Plan) string {
+	return fmt.Sprintf("%s-%s", plan.Name, VddkConf)
+}
+
 // Get the importer pod for a PersistentVolumeClaim.
 func (r *KubeVirt) GetImporterPod(pvc core.PersistentVolumeClaim) (pod *core.Pod, found bool, err error) {
 	pod = &core.Pod{}
@@ -583,6 +594,12 @@ func (r *KubeVirt) DataVolumes(vm *plan.VMStatus) (dataVolumes []cdi.DataVolume,
 	if err != nil {
 		return
 	}
+	if r.Source.Provider.UseVddkAioOptimization() {
+		_, err = r.ensureVddkConfigMap()
+		if err != nil {
+			return nil, err
+		}
+	}
 
 	dataVolumes, err = r.dataVolumes(vm, secret, configMap)
 	if err != nil {
@@ -641,6 +658,83 @@ func (r *KubeVirt) EnsureDataVolumes(vm *plan.VMStatus, dataVolumes []cdi.DataVo
 	return
 }
 
+func (r *KubeVirt) vddkConfigMap(labels map[string]string) (*core.ConfigMap, error) {
+	data := make(map[string]string)
+	if r.Source.Provider.UseVddkAioOptimization() {
+		buffSize := r.Source.Provider.Spec.Settings[api.VddkAioBufSize]
+		if buffSize == "" {
+			buffSize = VddkAioBufSizeDefault
+		}
+		buffCount := r.Source.Provider.Spec.Settings[api.VddkAioBufCount]
+		if buffCount == "" {
+			buffCount = VddkAioBufCountDefault
+		}
+		data["vddk-config-file"] = fmt.Sprintf(
+			"VixDiskLib.nfcAio.Session.BufSizeIn64K=%s\n"+
+				"VixDiskLib.nfcAio.Session.BufCount=%s", buffSize, buffCount)
+	}
+	configMap := core.ConfigMap{
+		Data: data,
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      genVddkConfConfigMapName(r.Plan),
+			Namespace: r.Plan.Spec.TargetNamespace,
+			Labels:    labels,
+		},
+	}
+	return &configMap, nil
+}
+
+func (r *KubeVirt) ensureVddkConfigMap() (configMap *core.ConfigMap, err error) {
+	labels := r.vddkLabels()
+	newConfigMap, err := r.vddkConfigMap(labels)
+	if err != nil {
+		return
+	}
+
+	list := &core.ConfigMapList{}
+	err = r.Destination.Client.List(
+		context.TODO(),
+		list,
+		&client.ListOptions{
+			LabelSelector: k8slabels.SelectorFromSet(labels),
+			Namespace:     r.Plan.Spec.TargetNamespace,
+		},
+	)
+	if err != nil {
+		err = liberr.Wrap(err)
+		return
+	}
+	if len(list.Items) > 0 {
+		configMap = &list.Items[0]
+		configMap.Data = newConfigMap.Data
+		err = r.Destination.Client.Update(context.TODO(), configMap)
+		if err != nil {
+			err = liberr.Wrap(err)
+			return
+		}
+		r.Log.V(1).Info(
+			"VDDK extra args configmap updated.",
+			"configmap",
+			path.Join(
+				configMap.Namespace,
+				configMap.Name))
+	} else {
+		configMap = newConfigMap
+		err = r.Destination.Client.Create(context.TODO(), configMap)
+		if err != nil {
+			err = liberr.Wrap(err)
+			return
+		}
+		r.Log.V(1).Info(
+			"VDDK extra args configmap created.",
+			"configmap",
+			path.Join(
+				configMap.Namespace,
+				configMap.Name))
+	}
+	return
+}
+
 func (r *KubeVirt) EnsurePopulatorVolumes(vm *plan.VMStatus, pvcs []*core.PersistentVolumeClaim) (err error) {
 	var pendingPvcNames []string
 	for _, pvc := range pvcs {
@@ -850,12 +944,17 @@ func (r *KubeVirt) EnsureGuestConversionPod(vm *plan.VMStatus, vmCr *VirtualMach
 		return
 	}
 
-	configMap, err := r.ensureLibvirtConfigMap(vm.Ref, vmCr, pvcs)
+	libvirtConfigMap, err := r.ensureLibvirtConfigMap(vm.Ref, vmCr, pvcs)
+	if err != nil {
+		return
+	}
+
+	vddkConfigmap, err := r.ensureVddkConfigMap()
 	if err != nil {
 		return
 	}
 
-	newPod, err := r.guestConversionPod(vm, vmCr.Spec.Template.Spec.Volumes, configMap, pvcs, v2vSecret)
+	newPod, err := r.guestConversionPod(vm, vmCr.Spec.Template.Spec.Volumes, libvirtConfigMap, vddkConfigmap, pvcs, v2vSecret)
 	if err != nil {
 		return
 	}
@@ -1690,8 +1789,8 @@ func (r *KubeVirt) findTemplate(vm *plan.VMStatus) (tmpl *template.Template, err
 	return
 }
 
-func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, v2vSecret *core.Secret) (pod *core.Pod, err error) {
-	volumes, volumeMounts, volumeDevices, err := r.podVolumeMounts(vmVolumes, configMap, pvcs, vm)
+func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, libvirtConfigMap *core.ConfigMap, vddkConfigmap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, v2vSecret *core.Secret) (pod *core.Pod, err error) {
+	volumes, volumeMounts, volumeDevices, err := r.podVolumeMounts(vmVolumes, libvirtConfigMap, vddkConfigmap, pvcs, vm)
 	if err != nil {
 		return
 	}
@@ -1892,7 +1991,7 @@ func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume,
 	return
 }
 
-func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, vm *plan.VMStatus) (volumes []core.Volume, mounts []core.VolumeMount, devices []core.VolumeDevice, err error) {
+func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, libvirtConfigMap *core.ConfigMap, vddkConfigmap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, vm *plan.VMStatus) (volumes []core.Volume, mounts []core.VolumeMount, devices []core.VolumeDevice, err error) {
 	pvcsByName := make(map[string]*core.PersistentVolumeClaim)
 	for _, pvc := range pvcs {
 		pvcsByName[pvc.Name] = pvc
@@ -1930,7 +2029,7 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi
 		VolumeSource: core.VolumeSource{
 			ConfigMap: &core.ConfigMapVolumeSource{
 				LocalObjectReference: core.LocalObjectReference{
-					Name: configMap.Name,
+					Name: libvirtConfigMap.Name,
 				},
 			},
 		},
@@ -1949,6 +2048,19 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi
 			},
 		})
 	}
+	useVddkConf := r.Source.Provider.UseVddkAioOptimization()
+	if useVddkConf {
+		volumes = append(volumes, core.Volume{
+			Name: VddkConf,
+			VolumeSource: core.VolumeSource{
+				ConfigMap: &core.ConfigMapVolumeSource{
+					LocalObjectReference: core.LocalObjectReference{
+						Name: vddkConfigmap.Name,
+					},
+				},
+			},
+		})
+	}
 
 	switch r.Source.Provider.Type() {
 	case api.Ova:
@@ -2006,6 +2118,14 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi
 				},
 			)
 		}
+		if useVddkConf {
+			mounts = append(mounts,
+				core.VolumeMount{
+					Name:      VddkConf,
+					MountPath: fmt.Sprintf("/mnt/%s", VddkConf),
+				},
+			)
+		}
 	}
 
 	_, exists, err := r.findConfigMapInNamespace(Settings.VirtCustomizeConfigMap, r.Plan.Spec.TargetNamespace)
@@ -2389,6 +2509,13 @@ func (r *KubeVirt) vmLabels(vmRef ref.Ref) (labels map[string]string) {
 	return
 }
 
+// Labels for a VM on a plan.
+func (r *KubeVirt) vddkLabels() (labels map[string]string) {
+	labels = r.planLabels()
+	labels[VddkConf] = VddkConf
+	return
+}
+
 // Labels for a VM on a plan without migration label.
 func (r *KubeVirt) vmAllButMigrationLabels(vmRef ref.Ref) (labels map[string]string) {
 	labels = r.vmLabels(vmRef)
diff --git a/virt-v2v/cmd/entrypoint.go b/virt-v2v/cmd/entrypoint.go
index ca9b34a58..9ab75e883 100644
--- a/virt-v2v/cmd/entrypoint.go
+++ b/virt-v2v/cmd/entrypoint.go
@@ -3,6 +3,7 @@ package main
 import (
 	_ "embed"
 	"encoding/json"
+	"errors"
 	"fmt"
 	"io"
 	"os"
@@ -144,12 +145,18 @@ func virtV2vVsphereArgs() (args []string, err error) {
 	if err != nil {
 		return nil, err
 	}
-	if info, err := os.Stat(global.VDDK); err == nil && info.IsDir() {
+	if info, err := os.Stat(global.VDDK_LIB); err == nil && info.IsDir() {
 		args = append(args,
 			"-it", "vddk",
-			"-io", fmt.Sprintf("vddk-libdir=%s", global.VDDK),
+			"-io", fmt.Sprintf("vddk-libdir=%s", global.VDDK_LIB),
 			"-io", fmt.Sprintf("vddk-thumbprint=%s", os.Getenv("V2V_fingerprint")),
 		)
+		// Check if the config file exists but still allow the extra args to override the vddk-config for testing
+		if _, err := os.Stat(global.VDDK_CONF_FILE); !errors.Is(err, os.ErrNotExist) && os.Getenv("V2V_extra_args") != "" {
+			args = append(args,
+				"-io", fmt.Sprintf("vddk-config=%s", global.VDDK_CONF_FILE),
+			)
+		}
 	}
 
 	// When converting VM with name that do not meet DNS1123 RFC requirements,
diff --git a/virt-v2v/pkg/global/variables.go b/virt-v2v/pkg/global/variables.go
index 8d9c589a2..c930611e0 100644
--- a/virt-v2v/pkg/global/variables.go
+++ b/virt-v2v/pkg/global/variables.go
@@ -3,14 +3,15 @@ package global
 type MountPath string
 
 const (
-	OVA                  = "ova"
-	VSPHERE              = "vSphere"
-	DIR                  = "/var/tmp/v2v"
-	INSPECTION           = "/var/tmp/v2v/inspection.xml"
-	FS         MountPath = "/mnt/disks/disk[0-9]*"
-	BLOCK      MountPath = "/dev/block[0-9]*"
-	VDDK                 = "/opt/vmware-vix-disklib-distrib"
-	LUKSDIR              = "/etc/luks"
+	OVA                      = "ova"
+	VSPHERE                  = "vSphere"
+	DIR                      = "/var/tmp/v2v"
+	INSPECTION               = "/var/tmp/v2v/inspection.xml"
+	FS             MountPath = "/mnt/disks/disk[0-9]*"
+	BLOCK          MountPath = "/dev/block[0-9]*"
+	VDDK_LIB                 = "/opt/vmware-vix-disklib-distrib"
+	LUKSDIR                  = "/etc/luks"
+	VDDK_CONF_FILE           = "/mnt/vddk-conf/vddk-config-file"
 
 	WIN_FIRSTBOOT_PATH         = "/Program Files/Guestfs/Firstboot"
 	WIN_FIRSTBOOT_SCRIPTS_PATH = "/Program Files/Guestfs/Firstboot/scripts"