From 419605d965b4580248f236d6dd2158862ac52d45 Mon Sep 17 00:00:00 2001 From: Martin Necas Date: Wed, 16 Oct 2024 13:37:41 +0200 Subject: [PATCH] MTV-1536 | Use CDI for disk transfer in cold migration Issues: [1] Allow migration of "unknow" guests Right now when we want to migrate an unknown and unsupported operating system which is unsupported by the virt-v2v [3]. [2] Unifying the process and potential speedup Right now we are using two different methods for the disk transfer. This brings additional engineering for maintaining two paths. It's harder to debug two different flows. The virt-v2v transfers the disks in the sequence whereas using the CDI we can start multiple disk imports in parallel. This can improve the migration speeds. Fix: MTV is already using the CNV CDI for the warm and remote migration. We just need to adjust the code to remove the virt-v2v transfer and rely on the CNV CDI to do it for us. Drawbacks: - CNV CDI *requires* the VDDK, which was till now highly recommended. - CNV CDI is not maintained inside the MTV and there might be problems escalating and backporting the patches as CNV has a different release cycle. - Because we will be migrating all disks in parallel we need to optimise our migration scheduler as we don't want to take too much of the hosts/network resources. I have already done some optimisations in [4,5,6]. Notes: This change removes the usage of virt-v2v and we will only use the virt-v2v-in-place. Ref: [1] https://issues.redhat.com/browse/MTV-1536 [2] https://issues.redhat.com/browse/MTV-1581 [3] https://access.redhat.com/articles/1351473 [4] https://github.com/kubev2v/forklift/pull/1088 [5] https://github.com/kubev2v/forklift/pull/1087 [6] https://github.com/kubev2v/forklift/pull/1086 Signed-off-by: Martin Necas --- cmd/virt-v2v-monitor/BUILD.bazel | 19 --- cmd/virt-v2v-monitor/virt-v2v-monitor.go | 127 ------------------ pkg/apis/forklift/v1beta1/BUILD.bazel | 1 - pkg/apis/forklift/v1beta1/plan.go | 25 ---- .../plan/adapter/vsphere/builder.go | 32 ++--- pkg/controller/plan/adapter/vsphere/client.go | 7 - pkg/controller/plan/kubevirt.go | 52 +------ pkg/controller/plan/migration.go | 109 +-------------- .../plan/scheduler/vsphere/scheduler.go | 30 ++--- .../admitters/plan-admitter.go | 11 -- .../forklift-controller/migration_metrics.go | 2 +- virt-v2v/BUILD.bazel | 1 - virt-v2v/cmd/entrypoint.go | 67 +-------- 13 files changed, 30 insertions(+), 453 deletions(-) delete mode 100644 cmd/virt-v2v-monitor/BUILD.bazel delete mode 100644 cmd/virt-v2v-monitor/virt-v2v-monitor.go diff --git a/cmd/virt-v2v-monitor/BUILD.bazel b/cmd/virt-v2v-monitor/BUILD.bazel deleted file mode 100644 index f5ab459f1..000000000 --- a/cmd/virt-v2v-monitor/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") - -go_binary( - name = "virt-v2v-monitor", - embed = [":virt-v2v-monitor_lib"], - visibility = ["//visibility:public"], -) - -go_library( - name = "virt-v2v-monitor_lib", - srcs = ["virt-v2v-monitor.go"], - importpath = "github.com/konveyor/forklift-controller/cmd/virt-v2v-monitor", - visibility = ["//visibility:private"], - deps = [ - "//vendor/github.com/prometheus/client_golang/prometheus", - "//vendor/github.com/prometheus/client_golang/prometheus/promhttp", - "//vendor/github.com/prometheus/client_model/go", - ], -) diff --git a/cmd/virt-v2v-monitor/virt-v2v-monitor.go b/cmd/virt-v2v-monitor/virt-v2v-monitor.go deleted file mode 100644 index 4b82c1c01..000000000 --- a/cmd/virt-v2v-monitor/virt-v2v-monitor.go +++ /dev/null @@ -1,127 +0,0 @@ -package main - -import ( - "bufio" - "fmt" - "net/http" - "os" - "regexp" - "strconv" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - dto "github.com/prometheus/client_model/go" -) - -var COPY_DISK_RE = regexp.MustCompile(`^.*Copying disk (\d+)/(\d+)`) -var DISK_PROGRESS_RE = regexp.MustCompile(`.+ (\d+)% \[[*-]+\]`) -var FINISHED_RE = regexp.MustCompile(`^\[[ .0-9]*\] Finishing off`) - -// Here is a scan function that imposes limit on returned line length. virt-v2v -// writes some overly long lines that don't fit into the internal buffer of -// Scanner. We could just provide bigger buffer, but it is hard to guess what -// size is large enough. Instead we just claim that line ends when it reaches -// buffer size. -func LimitedScanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { - advance, token, err = bufio.ScanLines(data, atEOF) - if token != nil || err != nil { - return - } - if len(data) == bufio.MaxScanTokenSize { - // Line is too long for the buffer. Trim it. - advance = len(data) - token = data - } - return -} - -func updateProgress(progressCounter *prometheus.CounterVec, disk, progress uint64) (err error) { - if disk == 0 { - return - } - - label := strconv.FormatUint(disk, 10) - - var m = &dto.Metric{} - if err = progressCounter.WithLabelValues(label).Write(m); err != nil { - return - } - previous_progress := m.Counter.GetValue() - - change := float64(progress) - previous_progress - if change > 0 { - fmt.Printf("virt-v2v monitoring: Progress changed for disk %d about %v\n", disk, change) - progressCounter.WithLabelValues(label).Add(change) - } - return -} - -func main() { - // Start prometheus metrics HTTP handler - fmt.Println("virt-v2v monitoring: Setting up prometheus endpoint :2112/metrics") - http.Handle("/metrics", promhttp.Handler()) - go http.ListenAndServe(":2112", nil) - - progressCounter := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Subsystem: "v2v", - Name: "disk_transfers", - Help: "Percent of disk copied", - }, - []string{"disk_id"}, - ) - if err := prometheus.Register(progressCounter); err != nil { - // Exit gracefully if we fail here. We don't need monitoring - // failures to hinder guest conversion. - fmt.Println("virt-v2v monitoring: Prometheus progress counter not registered:", err) - return - } - fmt.Println("virt-v2v monitoring: Prometheus progress counter registered.") - - var diskNumber uint64 = 0 - var disks uint64 = 0 - var progress uint64 = 0 - - scanner := bufio.NewScanner(os.Stdin) - scanner.Split(LimitedScanLines) - for scanner.Scan() { - line := scanner.Bytes() - os.Stdout.Write(line) - os.Stdout.Write([]byte("\n")) - err := scanner.Err() - if err != nil { - fmt.Println("virt-v2v monitoring: Output monitoring failed! ", err) - os.Exit(1) - } - - if match := COPY_DISK_RE.FindSubmatch(line); match != nil { - diskNumber, _ = strconv.ParseUint(string(match[1]), 10, 0) - disks, _ = strconv.ParseUint(string(match[2]), 10, 0) - fmt.Printf("virt-v2v monitoring: Copying disk %d out of %d\n", diskNumber, disks) - progress = 0 - err = updateProgress(progressCounter, diskNumber, progress) - } else if match := DISK_PROGRESS_RE.FindSubmatch(line); match != nil { - progress, _ = strconv.ParseUint(string(match[1]), 10, 0) - fmt.Printf("virt-v2v monitoring: Progress update, completed %d %%\n", progress) - err = updateProgress(progressCounter, diskNumber, progress) - } else if match := FINISHED_RE.Find(line); match != nil { - // Make sure we flag conversion as finished. This is - // just in case we miss the last progress update for some reason. - fmt.Println("virt-v2v monitoring: Finished") - for disk := uint64(0); disk < disks; disk++ { - err = updateProgress(progressCounter, disk, 100) - } - } - - if err != nil { - // Don't make processing errors fatal. - fmt.Println("virt-v2v monitoring: Error updating progress: ", err) - err = nil - } - } - err := scanner.Err() - if err != nil { - fmt.Println("virt-v2v monitoring: Output monitoring failed! ", err) - os.Exit(1) - } -} diff --git a/pkg/apis/forklift/v1beta1/BUILD.bazel b/pkg/apis/forklift/v1beta1/BUILD.bazel index 82b3bbad3..ea1f81a2f 100644 --- a/pkg/apis/forklift/v1beta1/BUILD.bazel +++ b/pkg/apis/forklift/v1beta1/BUILD.bazel @@ -23,7 +23,6 @@ go_library( "//pkg/apis/forklift/v1beta1/provider", "//pkg/apis/forklift/v1beta1/ref", "//pkg/lib/condition", - "//pkg/lib/error", "//vendor/k8s.io/api/core/v1:core", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:meta", "//vendor/k8s.io/apimachinery/pkg/runtime", diff --git a/pkg/apis/forklift/v1beta1/plan.go b/pkg/apis/forklift/v1beta1/plan.go index e9274c8bb..04e0cf7cf 100644 --- a/pkg/apis/forklift/v1beta1/plan.go +++ b/pkg/apis/forklift/v1beta1/plan.go @@ -21,7 +21,6 @@ import ( "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/provider" "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/ref" libcnd "github.com/konveyor/forklift-controller/pkg/lib/condition" - liberr "github.com/konveyor/forklift-controller/pkg/lib/error" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -93,30 +92,6 @@ type Plan struct { Referenced `json:"-"` } -// If the plan calls for the vm to be cold migrated to the local cluster, we can -// just use virt-v2v directly to convert the vm while copying data over. In other -// cases, we use CDI to transfer disks to the destination cluster and then use -// virt-v2v-in-place to convert these disks after cutover. -func (p *Plan) VSphereColdLocal() (bool, error) { - source := p.Referenced.Provider.Source - if source == nil { - return false, liberr.New("Cannot analyze plan, source provider is missing.") - } - destination := p.Referenced.Provider.Destination - if destination == nil { - return false, liberr.New("Cannot analyze plan, destination provider is missing.") - } - - switch source.Type() { - case VSphere: - return !p.Spec.Warm && destination.IsHost(), nil - case Ova: - return true, nil - default: - return false, nil - } -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type PlanList struct { meta.TypeMeta `json:",inline"` diff --git a/pkg/controller/plan/adapter/vsphere/builder.go b/pkg/controller/plan/adapter/vsphere/builder.go index 86dffca4d..968fa7ff7 100644 --- a/pkg/controller/plan/adapter/vsphere/builder.go +++ b/pkg/controller/plan/adapter/vsphere/builder.go @@ -434,28 +434,16 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config if disk.Datastore.ID == ds.ID { storageClass := mapped.Destination.StorageClass var dvSource cdi.DataVolumeSource - coldLocal, vErr := r.Context.Plan.VSphereColdLocal() - if vErr != nil { - err = vErr - return - } - if coldLocal { - // Let virt-v2v do the copying - dvSource = cdi.DataVolumeSource{ - Blank: &cdi.DataVolumeBlankImage{}, - } - } else { - // Let CDI do the copying - dvSource = cdi.DataVolumeSource{ - VDDK: &cdi.DataVolumeSourceVDDK{ - BackingFile: r.baseVolume(disk.File), - UUID: vm.UUID, - URL: url, - SecretRef: secret.Name, - Thumbprint: thumbprint, - InitImageURL: r.Source.Provider.Spec.Settings[api.VDDK], - }, - } + // Let CDI do the copying + dvSource = cdi.DataVolumeSource{ + VDDK: &cdi.DataVolumeSourceVDDK{ + BackingFile: r.baseVolume(disk.File), + UUID: vm.UUID, + URL: url, + SecretRef: secret.Name, + Thumbprint: thumbprint, + InitImageURL: r.Source.Provider.Spec.Settings[api.VDDK], + }, } dvSpec := cdi.DataVolumeSpec{ Source: &dvSource, diff --git a/pkg/controller/plan/adapter/vsphere/client.go b/pkg/controller/plan/adapter/vsphere/client.go index 41fc0b7c3..785da3777 100644 --- a/pkg/controller/plan/adapter/vsphere/client.go +++ b/pkg/controller/plan/adapter/vsphere/client.go @@ -277,13 +277,6 @@ func (r *Client) getChangeIds(vmRef ref.Ref, snapshotId string, hosts util.Hosts } func (r *Client) getClient(vm *model.VM, hosts util.HostsFunc) (client *vim25.Client, err error) { - if coldLocal, vErr := r.Plan.VSphereColdLocal(); vErr == nil && coldLocal { - // when virt-v2v runs the migration, forklift-controller should interact only - // with the component that serves the SDK endpoint of the provider - client = r.client.Client - return - } - if r.Source.Provider.Spec.Settings[v1beta1.SDK] == v1beta1.ESXI { // when migrating from ESXi host, we use the client of the SDK endpoint of the provider, // there's no need in a different client (the ESXi host is the only component involved in the migration) diff --git a/pkg/controller/plan/kubevirt.go b/pkg/controller/plan/kubevirt.go index af44af2a1..181c71beb 100644 --- a/pkg/controller/plan/kubevirt.go +++ b/pkg/controller/plan/kubevirt.go @@ -824,18 +824,12 @@ func (r *KubeVirt) getListOptionsNamespaced() (listOptions *client.ListOptions) // Ensure the guest conversion (virt-v2v) pod exists on the destination. func (r *KubeVirt) EnsureGuestConversionPod(vm *plan.VMStatus, vmCr *VirtualMachine, pvcs []*core.PersistentVolumeClaim) (err error) { - labels := r.vmLabels(vm.Ref) - v2vSecret, err := r.ensureSecret(vm.Ref, r.secretDataSetterForCDI(vm.Ref), labels) - if err != nil { - return - } - configMap, err := r.ensureLibvirtConfigMap(vm.Ref, vmCr, pvcs) if err != nil { return } - newPod, err := r.guestConversionPod(vm, vmCr.Spec.Template.Spec.Volumes, configMap, pvcs, v2vSecret) + newPod, err := r.guestConversionPod(vm, vmCr.Spec.Template.Spec.Volumes, configMap, pvcs) if err != nil { return } @@ -1667,7 +1661,7 @@ func (r *KubeVirt) findTemplate(vm *plan.VMStatus) (tmpl *template.Template, err return } -func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, v2vSecret *core.Secret) (pod *core.Pod, err error) { +func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim) (pod *core.Pod, err error) { volumes, volumeMounts, volumeDevices, err := r.podVolumeMounts(vmVolumes, configMap, pvcs, vm) if err != nil { return @@ -1684,34 +1678,6 @@ func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, user := qemuUser nonRoot := true allowPrivilageEscalation := false - // virt-v2v image - coldLocal, vErr := r.Context.Plan.VSphereColdLocal() - if vErr != nil { - err = vErr - return - } - if coldLocal { - // mount the secret for the password and CA certificate - volumes = append(volumes, core.Volume{ - Name: "secret-volume", - VolumeSource: core.VolumeSource{ - Secret: &core.SecretVolumeSource{ - SecretName: v2vSecret.Name, - }, - }, - }) - volumeMounts = append(volumeMounts, core.VolumeMount{ - Name: "secret-volume", - ReadOnly: true, - MountPath: "/etc/secret", - }) - } else { - environment = append(environment, - core.EnvVar{ - Name: "V2V_inPlace", - Value: "1", - }) - } // VDDK image var initContainers []core.Container if vddkImage, found := r.Source.Provider.Spec.Settings[api.VDDK]; found { @@ -1805,18 +1771,8 @@ func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, InitContainers: initContainers, Containers: []core.Container{ { - Name: "virt-v2v", - Env: environment, - EnvFrom: []core.EnvFromSource{ - { - Prefix: "V2V_", - SecretRef: &core.SecretEnvSource{ - LocalObjectReference: core.LocalObjectReference{ - Name: v2vSecret.Name, - }, - }, - }, - }, + Name: "virt-v2v", + Env: environment, Image: Settings.Migration.VirtV2vImage, VolumeMounts: volumeMounts, VolumeDevices: volumeDevices, diff --git a/pkg/controller/plan/migration.go b/pkg/controller/plan/migration.go index f3c9e0530..c42082ccc 100644 --- a/pkg/controller/plan/migration.go +++ b/pkg/controller/plan/migration.go @@ -4,11 +4,7 @@ import ( "context" "errors" "fmt" - "io" - "net/http" "path" - "regexp" - "strconv" "strings" "time" @@ -46,8 +42,6 @@ var ( HasPreHook libitr.Flag = 0x01 HasPostHook libitr.Flag = 0x02 RequiresConversion libitr.Flag = 0x04 - CDIDiskCopy libitr.Flag = 0x08 - VirtV2vDiskCopy libitr.Flag = 0x10 OpenstackImageMigration libitr.Flag = 0x20 ) @@ -61,7 +55,6 @@ const ( CreateDataVolumes = "CreateDataVolumes" CreateVM = "CreateVM" CopyDisks = "CopyDisks" - AllocateDisks = "AllocateDisks" CopyingPaused = "CopyingPaused" AddCheckpoint = "AddCheckpoint" AddFinalCheckpoint = "AddFinalCheckpoint" @@ -71,7 +64,6 @@ const ( Finalize = "Finalize" CreateGuestConversionPod = "CreateGuestConversionPod" ConvertGuest = "ConvertGuest" - CopyDisksVirtV2V = "CopyDisksVirtV2V" PostHook = "PostHook" Completed = "Completed" WaitForSnapshot = "WaitForSnapshot" @@ -84,10 +76,8 @@ const ( const ( Initialize = "Initialize" Cutover = "Cutover" - DiskAllocation = "DiskAllocation" DiskTransfer = "DiskTransfer" ImageConversion = "ImageConversion" - DiskTransferV2v = "DiskTransferV2v" VMCreation = "VirtualMachineCreation" Unknown = "Unknown" ) @@ -107,11 +97,9 @@ var ( {Name: PowerOffSource}, {Name: WaitForPowerOff}, {Name: CreateDataVolumes}, - {Name: CopyDisks, All: CDIDiskCopy}, - {Name: AllocateDisks, All: VirtV2vDiskCopy}, + {Name: CopyDisks}, {Name: CreateGuestConversionPod, All: RequiresConversion}, {Name: ConvertGuest, All: RequiresConversion}, - {Name: CopyDisksVirtV2V, All: RequiresConversion}, {Name: ConvertOpenstackSnapshot, All: OpenstackImageMigration}, {Name: CreateVM}, {Name: PostHook, All: HasPostHook}, @@ -643,16 +631,12 @@ func (r *Migration) step(vm *plan.VMStatus) (step string) { switch vm.Phase { case Started, CreateInitialSnapshot, WaitForInitialSnapshot, CreateDataVolumes: step = Initialize - case AllocateDisks: - step = DiskAllocation case CopyDisks, CopyingPaused, CreateSnapshot, WaitForSnapshot, AddCheckpoint, ConvertOpenstackSnapshot: step = DiskTransfer case CreateFinalSnapshot, WaitForFinalSnapshot, AddFinalCheckpoint, Finalize: step = Cutover case CreateGuestConversionPod, ConvertGuest: step = ImageConversion - case CopyDisksVirtV2V: - step = DiskTransferV2v case CreateVM: step = VMCreation case PreHook, PostHook: @@ -882,7 +866,7 @@ func (r *Migration) execute(vm *plan.VMStatus) (err error) { step.MarkCompleted() step.Phase = Completed vm.Phase = r.next(vm.Phase) - case AllocateDisks, CopyDisks: + case CopyDisks: step, found := vm.FindStep(r.step(vm)) if !found { vm.AddError(fmt.Sprintf("Step '%s' not found", r.step(vm))) @@ -1127,7 +1111,7 @@ func (r *Migration) execute(vm *plan.VMStatus) (err error) { return } vm.Phase = r.next(vm.Phase) - case ConvertGuest, CopyDisksVirtV2V: + case ConvertGuest: step, found := vm.FindStep(r.step(vm)) if !found { vm.AddError(fmt.Sprintf("Step '%s' not found", r.step(vm))) @@ -1255,7 +1239,7 @@ func (r *Migration) buildPipeline(vm *plan.VM) (pipeline []*plan.Step, err error Phase: Pending, }, }) - case AllocateDisks, CopyDisks, CopyDisksVirtV2V, ConvertOpenstackSnapshot: + case CopyDisks, ConvertOpenstackSnapshot: tasks, pErr := r.builder.Tasks(vm.Ref) if pErr != nil { err = liberr.Wrap(pErr) @@ -1270,12 +1254,6 @@ func (r *Migration) buildPipeline(vm *plan.VM) (pipeline []*plan.Step, err error case CopyDisks: task_name = DiskTransfer task_description = "Transfer disks." - case AllocateDisks: - task_name = DiskAllocation - task_description = "Allocate disks." - case CopyDisksVirtV2V: - task_name = DiskTransferV2v - task_description = "Copy disks." case ConvertOpenstackSnapshot: task_name = ConvertOpenstackSnapshot task_description = "Convert OpenStack snapshot." @@ -1652,80 +1630,11 @@ func (r *Migration) updateConversionProgress(vm *plan.VMStatus, step *plan.Step) case core.PodFailed: step.MarkCompleted() step.AddError("Guest conversion failed. See pod logs for details.") - default: - if pod.Status.PodIP == "" { - // we get the progress from the pod and we cannot connect to the pod without PodIP - break - } - - coldLocal, err := r.Context.Plan.VSphereColdLocal() - switch { - case err != nil: - return liberr.Wrap(err) - case coldLocal: - if err := r.updateConversionProgressV2vMonitor(pod, step); err != nil { - // Just log it. Missing progress is not fatal. - log.Error(err, "Failed to update conversion progress") - } - } } return nil } -func (r *Migration) updateConversionProgressV2vMonitor(pod *core.Pod, step *plan.Step) (err error) { - var diskRegex = regexp.MustCompile(`v2v_disk_transfers\{disk_id="(\d+)"\} (\d{1,3}\.?\d*)`) - url := fmt.Sprintf("http://%s:2112/metrics", pod.Status.PodIP) - resp, err := http.Get(url) - switch { - case err == nil: - defer resp.Body.Close() - case strings.Contains(err.Error(), "connection refused"): - return nil - default: - return - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return - } - matches := diskRegex.FindAllStringSubmatch(string(body), -1) - if matches == nil { - return - } - someProgress := false - for _, match := range matches { - diskNumber, _ := strconv.ParseUint(string(match[1]), 10, 0) - progress, _ := strconv.ParseFloat(string(match[2]), 64) - r.Log.Info("Progress update", "disk", diskNumber, "progress", progress, "tasks", step.Tasks) - if progress > 100 { - r.Log.Info("Progress seems out of range", "progress", progress) - progress = 100 - } - - someProgress = someProgress || progress > 0 - if diskNumber > uint64(len(step.Tasks)) { - r.Log.Info("Ignoring progress update", "disk", diskNumber, "disks count", len(step.Tasks), "step", step.Name) - continue - } - task := step.Tasks[diskNumber-1] - if step.Name == DiskTransferV2v { - // Update copy progress if we're in CopyDisksVirtV2V step. - task.Progress.Completed = int64(float64(task.Progress.Total) * progress / 100) - } - } - step.ReflectTasks() - if step.Name == ImageConversion && someProgress && r.Source.Provider.Type() != v1beta1.Ova { - // Disk copying has already started. Transition from - // ConvertGuest to CopyDisksVirtV2V . - step.MarkCompleted() - step.Progress.Completed = step.Progress.Total - return - } - return -} - func (r *Migration) setDataVolumeCheckpoints(vm *plan.VMStatus) (err error) { disks, err := r.kubevirt.getDVs(vm) if err != nil { @@ -1853,12 +1762,6 @@ type Predicate struct { // Evaluate predicate flags. func (r *Predicate) Evaluate(flag libitr.Flag) (allowed bool, err error) { - coldLocal, vErr := r.context.Plan.VSphereColdLocal() - if vErr != nil { - err = vErr - return - } - switch flag { case HasPreHook: _, allowed = r.vm.FindHook(PreHook) @@ -1866,10 +1769,6 @@ func (r *Predicate) Evaluate(flag libitr.Flag) (allowed bool, err error) { _, allowed = r.vm.FindHook(PostHook) case RequiresConversion: allowed = r.context.Source.Provider.RequiresConversion() - case CDIDiskCopy: - allowed = !coldLocal - case VirtV2vDiskCopy: - allowed = coldLocal case OpenstackImageMigration: allowed = r.context.Plan.IsSourceProviderOpenstack() } diff --git a/pkg/controller/plan/scheduler/vsphere/scheduler.go b/pkg/controller/plan/scheduler/vsphere/scheduler.go index 82cb722b8..6be0f127d 100644 --- a/pkg/controller/plan/scheduler/vsphere/scheduler.go +++ b/pkg/controller/plan/scheduler/vsphere/scheduler.go @@ -197,27 +197,15 @@ func (r *Scheduler) buildPending() (err error) { } func (r *Scheduler) cost(vm *model.VM, vmStatus *plan.VMStatus) int { - coldLocal, _ := r.Plan.VSphereColdLocal() - if coldLocal { - switch vmStatus.Phase { - case CreateVM, PostHook, Completed: - // In these phases we already have the disk transferred and are left only to create the VM - // By setting the cost to 0 other VMs can start migrating - return 0 - default: - return 1 - } - } else { - switch vmStatus.Phase { - case CreateVM, PostHook, Completed, CopyingPaused, ConvertGuest, CreateGuestConversionPod: - // The warm/remote migrations this is done on already transferred disks, - // and we can start other VM migrations at these point. - // By setting the cost to 0 other VMs can start migrating - return 0 - default: - // CDI transfers the disks in parallel by different pods - return len(vm.Disks) - r.finishedDisks(vmStatus) - } + switch vmStatus.Phase { + case CreateVM, PostHook, Completed, CopyingPaused, ConvertGuest, CreateGuestConversionPod: + // The warm/remote migrations this is done on already transferred disks, + // and we can start other VM migrations at these point. + // By setting the cost to 0 other VMs can start migrating + return 0 + default: + // CDI transfers the disks in parallel by different pods + return len(vm.Disks) - r.finishedDisks(vmStatus) } } diff --git a/pkg/forklift-api/webhooks/validating-webhook/admitters/plan-admitter.go b/pkg/forklift-api/webhooks/validating-webhook/admitters/plan-admitter.go index afa3b4881..775e2a371 100644 --- a/pkg/forklift-api/webhooks/validating-webhook/admitters/plan-admitter.go +++ b/pkg/forklift-api/webhooks/validating-webhook/admitters/plan-admitter.go @@ -108,17 +108,6 @@ func (admitter *PlanAdmitter) validateLUKS() error { log.Error(err, "Provider type (non-VSphere & non-OVA) does not support LUKS") return err } - - coldLocal, vErr := admitter.plan.VSphereColdLocal() - if vErr != nil { - log.Error(vErr, "Could not analyze plan, failing") - return vErr - } - if !coldLocal { - err := liberr.New("migration of encrypted disks is not supported for warm migrations or migrations to remote providers") - log.Error(err, "Warm migration does not support LUKS") - return err - } return nil } diff --git a/pkg/monitoring/metrics/forklift-controller/migration_metrics.go b/pkg/monitoring/metrics/forklift-controller/migration_metrics.go index b30530d9e..4c4ed6adf 100644 --- a/pkg/monitoring/metrics/forklift-controller/migration_metrics.go +++ b/pkg/monitoring/metrics/forklift-controller/migration_metrics.go @@ -116,7 +116,7 @@ func recordSuccessfulMigrationMetrics(migration api.Migration, provider, mode, t var totalDataTransferred float64 for _, vm := range migration.Status.VMs { for _, step := range vm.Pipeline { - if step.Name == "DiskTransferV2v" || step.Name == "DiskTransfer" { + if step.Name == "DiskTransfer" { for _, task := range step.Tasks { totalDataTransferred += float64(task.Progress.Completed) * 1024 * 1024 // convert to Bytes } diff --git a/virt-v2v/BUILD.bazel b/virt-v2v/BUILD.bazel index c8e001e7b..b1b69a291 100644 --- a/virt-v2v/BUILD.bazel +++ b/virt-v2v/BUILD.bazel @@ -102,7 +102,6 @@ container_image( files = [ "//cmd:virt-v2v-wrapper", "@forklift//cmd/image-converter", - "@forklift//cmd/virt-v2v-monitor", ], user = "1001", visibility = ["//visibility:public"], diff --git a/virt-v2v/cmd/entrypoint.go b/virt-v2v/cmd/entrypoint.go index ca9b34a58..834cf0b21 100644 --- a/virt-v2v/cmd/entrypoint.go +++ b/virt-v2v/cmd/entrypoint.go @@ -4,7 +4,6 @@ import ( _ "embed" "encoding/json" "fmt" - "io" "os" "os/exec" "strconv" @@ -23,12 +22,8 @@ func main() { os.Exit(1) } - // virt-v2v or virt-v2v-in-place - if _, found := os.LookupEnv("V2V_inPlace"); found { - err = runVirtV2vInPlace() - } else { - err = runVirtV2v() - } + // virt-v2v-in-place + err = runVirtV2vInPlace() if err != nil { fmt.Println("Failed to execute virt-v2v command:", err) os.Exit(1) @@ -196,67 +191,9 @@ func addCommonArgs(args []string) ([]string, error) { return args, nil } -func runVirtV2v() error { - args, err := virtV2vBuildCommand() - if err != nil { - return err - } - v2vCmd := exec.Command("virt-v2v", args...) - // The virt-v2v-monitor reads the virt-v2v stdout and processes it and exposes the progress of the migration. - monitorCmd := exec.Command("/usr/local/bin/virt-v2v-monitor") - monitorCmd.Stdout = os.Stdout - monitorCmd.Stderr = os.Stderr - - var writer *io.PipeWriter - monitorCmd.Stdin, writer = io.Pipe() - v2vCmd.Stdout = writer - v2vCmd.Stderr = writer - defer writer.Close() - - if err := monitorCmd.Start(); err != nil { - fmt.Printf("Error executing monitor command: %v\n", err) - return err - } - - fmt.Println("exec:", v2vCmd) - if err := v2vCmd.Run(); err != nil { - fmt.Printf("Error executing v2v command: %v\n", err) - return err - } - - // virt-v2v is done, we can close the pipe to virt-v2v-monitor - writer.Close() - - if err := monitorCmd.Wait(); err != nil { - fmt.Printf("Error waiting for virt-v2v-monitor to finish: %v\n", err) - return err - } - - return nil -} - // VirtV2VPrepEnvironment used in the cold migration. // It creates a links between the downloaded guest image from virt-v2v and mounted PVC. func virtV2VPrepEnvironment() (err error) { - source := os.Getenv("V2V_source") - _, inplace := os.LookupEnv("V2V_inPlace") - if source == global.VSPHERE && !inplace { - if _, err := os.Stat("/etc/secret/cacert"); err == nil { - // use the specified certificate - err = os.Symlink("/etc/secret/cacert", "/opt/ca-bundle.crt") - if err != nil { - fmt.Println("Error creating ca cert link ", err) - os.Exit(1) - } - } else { - // otherwise, keep system pool certificates - err := os.Symlink("/etc/pki/tls/certs/ca-bundle.crt.bak", "/opt/ca-bundle.crt") - if err != nil { - fmt.Println("Error creating ca cert link ", err) - os.Exit(1) - } - } - } if err = os.MkdirAll(global.DIR, os.ModePerm); err != nil { return fmt.Errorf("Error creating directory: %v", err) }