diff --git a/pkg/apis/forklift/v1beta1/BUILD.bazel b/pkg/apis/forklift/v1beta1/BUILD.bazel index 82b3bbad3..ea1f81a2f 100644 --- a/pkg/apis/forklift/v1beta1/BUILD.bazel +++ b/pkg/apis/forklift/v1beta1/BUILD.bazel @@ -23,7 +23,6 @@ go_library( "//pkg/apis/forklift/v1beta1/provider", "//pkg/apis/forklift/v1beta1/ref", "//pkg/lib/condition", - "//pkg/lib/error", "//vendor/k8s.io/api/core/v1:core", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:meta", "//vendor/k8s.io/apimachinery/pkg/runtime", diff --git a/pkg/apis/forklift/v1beta1/plan.go b/pkg/apis/forklift/v1beta1/plan.go index e9274c8bb..ba6714bbf 100644 --- a/pkg/apis/forklift/v1beta1/plan.go +++ b/pkg/apis/forklift/v1beta1/plan.go @@ -21,7 +21,6 @@ import ( "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/provider" "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/ref" libcnd "github.com/konveyor/forklift-controller/pkg/lib/condition" - liberr "github.com/konveyor/forklift-controller/pkg/lib/error" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -93,30 +92,6 @@ type Plan struct { Referenced `json:"-"` } -// If the plan calls for the vm to be cold migrated to the local cluster, we can -// just use virt-v2v directly to convert the vm while copying data over. In other -// cases, we use CDI to transfer disks to the destination cluster and then use -// virt-v2v-in-place to convert these disks after cutover. -func (p *Plan) VSphereColdLocal() (bool, error) { - source := p.Referenced.Provider.Source - if source == nil { - return false, liberr.New("Cannot analyze plan, source provider is missing.") - } - destination := p.Referenced.Provider.Destination - if destination == nil { - return false, liberr.New("Cannot analyze plan, destination provider is missing.") - } - - switch source.Type() { - case VSphere: - return !p.Spec.Warm && destination.IsHost(), nil - case Ova: - return true, nil - default: - return false, nil - } -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type PlanList struct { meta.TypeMeta `json:",inline"` @@ -139,3 +114,7 @@ func (r *Plan) IsSourceProviderOvirt() bool { func (r *Plan) IsSourceProviderOCP() bool { return r.Provider.Source.Type() == OpenShift } + +func (r *Plan) IsSourceProviderOVA() bool { + return r.Provider.Source.Type() == Ova +} diff --git a/pkg/controller/plan/adapter/vsphere/builder.go b/pkg/controller/plan/adapter/vsphere/builder.go index 86dffca4d..968fa7ff7 100644 --- a/pkg/controller/plan/adapter/vsphere/builder.go +++ b/pkg/controller/plan/adapter/vsphere/builder.go @@ -434,28 +434,16 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config if disk.Datastore.ID == ds.ID { storageClass := mapped.Destination.StorageClass var dvSource cdi.DataVolumeSource - coldLocal, vErr := r.Context.Plan.VSphereColdLocal() - if vErr != nil { - err = vErr - return - } - if coldLocal { - // Let virt-v2v do the copying - dvSource = cdi.DataVolumeSource{ - Blank: &cdi.DataVolumeBlankImage{}, - } - } else { - // Let CDI do the copying - dvSource = cdi.DataVolumeSource{ - VDDK: &cdi.DataVolumeSourceVDDK{ - BackingFile: r.baseVolume(disk.File), - UUID: vm.UUID, - URL: url, - SecretRef: secret.Name, - Thumbprint: thumbprint, - InitImageURL: r.Source.Provider.Spec.Settings[api.VDDK], - }, - } + // Let CDI do the copying + dvSource = cdi.DataVolumeSource{ + VDDK: &cdi.DataVolumeSourceVDDK{ + BackingFile: r.baseVolume(disk.File), + UUID: vm.UUID, + URL: url, + SecretRef: secret.Name, + Thumbprint: thumbprint, + InitImageURL: r.Source.Provider.Spec.Settings[api.VDDK], + }, } dvSpec := cdi.DataVolumeSpec{ Source: &dvSource, diff --git a/pkg/controller/plan/adapter/vsphere/client.go b/pkg/controller/plan/adapter/vsphere/client.go index 41fc0b7c3..785da3777 100644 --- a/pkg/controller/plan/adapter/vsphere/client.go +++ b/pkg/controller/plan/adapter/vsphere/client.go @@ -277,13 +277,6 @@ func (r *Client) getChangeIds(vmRef ref.Ref, snapshotId string, hosts util.Hosts } func (r *Client) getClient(vm *model.VM, hosts util.HostsFunc) (client *vim25.Client, err error) { - if coldLocal, vErr := r.Plan.VSphereColdLocal(); vErr == nil && coldLocal { - // when virt-v2v runs the migration, forklift-controller should interact only - // with the component that serves the SDK endpoint of the provider - client = r.client.Client - return - } - if r.Source.Provider.Spec.Settings[v1beta1.SDK] == v1beta1.ESXI { // when migrating from ESXi host, we use the client of the SDK endpoint of the provider, // there's no need in a different client (the ESXi host is the only component involved in the migration) diff --git a/pkg/controller/plan/kubevirt.go b/pkg/controller/plan/kubevirt.go index af44af2a1..181c71beb 100644 --- a/pkg/controller/plan/kubevirt.go +++ b/pkg/controller/plan/kubevirt.go @@ -824,18 +824,12 @@ func (r *KubeVirt) getListOptionsNamespaced() (listOptions *client.ListOptions) // Ensure the guest conversion (virt-v2v) pod exists on the destination. func (r *KubeVirt) EnsureGuestConversionPod(vm *plan.VMStatus, vmCr *VirtualMachine, pvcs []*core.PersistentVolumeClaim) (err error) { - labels := r.vmLabels(vm.Ref) - v2vSecret, err := r.ensureSecret(vm.Ref, r.secretDataSetterForCDI(vm.Ref), labels) - if err != nil { - return - } - configMap, err := r.ensureLibvirtConfigMap(vm.Ref, vmCr, pvcs) if err != nil { return } - newPod, err := r.guestConversionPod(vm, vmCr.Spec.Template.Spec.Volumes, configMap, pvcs, v2vSecret) + newPod, err := r.guestConversionPod(vm, vmCr.Spec.Template.Spec.Volumes, configMap, pvcs) if err != nil { return } @@ -1667,7 +1661,7 @@ func (r *KubeVirt) findTemplate(vm *plan.VMStatus) (tmpl *template.Template, err return } -func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, v2vSecret *core.Secret) (pod *core.Pod, err error) { +func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim) (pod *core.Pod, err error) { volumes, volumeMounts, volumeDevices, err := r.podVolumeMounts(vmVolumes, configMap, pvcs, vm) if err != nil { return @@ -1684,34 +1678,6 @@ func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, user := qemuUser nonRoot := true allowPrivilageEscalation := false - // virt-v2v image - coldLocal, vErr := r.Context.Plan.VSphereColdLocal() - if vErr != nil { - err = vErr - return - } - if coldLocal { - // mount the secret for the password and CA certificate - volumes = append(volumes, core.Volume{ - Name: "secret-volume", - VolumeSource: core.VolumeSource{ - Secret: &core.SecretVolumeSource{ - SecretName: v2vSecret.Name, - }, - }, - }) - volumeMounts = append(volumeMounts, core.VolumeMount{ - Name: "secret-volume", - ReadOnly: true, - MountPath: "/etc/secret", - }) - } else { - environment = append(environment, - core.EnvVar{ - Name: "V2V_inPlace", - Value: "1", - }) - } // VDDK image var initContainers []core.Container if vddkImage, found := r.Source.Provider.Spec.Settings[api.VDDK]; found { @@ -1805,18 +1771,8 @@ func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, InitContainers: initContainers, Containers: []core.Container{ { - Name: "virt-v2v", - Env: environment, - EnvFrom: []core.EnvFromSource{ - { - Prefix: "V2V_", - SecretRef: &core.SecretEnvSource{ - LocalObjectReference: core.LocalObjectReference{ - Name: v2vSecret.Name, - }, - }, - }, - }, + Name: "virt-v2v", + Env: environment, Image: Settings.Migration.VirtV2vImage, VolumeMounts: volumeMounts, VolumeDevices: volumeDevices, diff --git a/pkg/controller/plan/migration.go b/pkg/controller/plan/migration.go index f3c9e0530..b34e5dfea 100644 --- a/pkg/controller/plan/migration.go +++ b/pkg/controller/plan/migration.go @@ -47,7 +47,7 @@ var ( HasPostHook libitr.Flag = 0x02 RequiresConversion libitr.Flag = 0x04 CDIDiskCopy libitr.Flag = 0x08 - VirtV2vDiskCopy libitr.Flag = 0x10 + OvaImageMigration libitr.Flag = 0x10 OpenstackImageMigration libitr.Flag = 0x20 ) @@ -61,7 +61,6 @@ const ( CreateDataVolumes = "CreateDataVolumes" CreateVM = "CreateVM" CopyDisks = "CopyDisks" - AllocateDisks = "AllocateDisks" CopyingPaused = "CopyingPaused" AddCheckpoint = "AddCheckpoint" AddFinalCheckpoint = "AddFinalCheckpoint" @@ -84,7 +83,6 @@ const ( const ( Initialize = "Initialize" Cutover = "Cutover" - DiskAllocation = "DiskAllocation" DiskTransfer = "DiskTransfer" ImageConversion = "ImageConversion" DiskTransferV2v = "DiskTransferV2v" @@ -108,10 +106,9 @@ var ( {Name: WaitForPowerOff}, {Name: CreateDataVolumes}, {Name: CopyDisks, All: CDIDiskCopy}, - {Name: AllocateDisks, All: VirtV2vDiskCopy}, {Name: CreateGuestConversionPod, All: RequiresConversion}, {Name: ConvertGuest, All: RequiresConversion}, - {Name: CopyDisksVirtV2V, All: RequiresConversion}, + {Name: CopyDisksVirtV2V, All: OvaImageMigration}, {Name: ConvertOpenstackSnapshot, All: OpenstackImageMigration}, {Name: CreateVM}, {Name: PostHook, All: HasPostHook}, @@ -643,8 +640,6 @@ func (r *Migration) step(vm *plan.VMStatus) (step string) { switch vm.Phase { case Started, CreateInitialSnapshot, WaitForInitialSnapshot, CreateDataVolumes: step = Initialize - case AllocateDisks: - step = DiskAllocation case CopyDisks, CopyingPaused, CreateSnapshot, WaitForSnapshot, AddCheckpoint, ConvertOpenstackSnapshot: step = DiskTransfer case CreateFinalSnapshot, WaitForFinalSnapshot, AddFinalCheckpoint, Finalize: @@ -882,7 +877,7 @@ func (r *Migration) execute(vm *plan.VMStatus) (err error) { step.MarkCompleted() step.Phase = Completed vm.Phase = r.next(vm.Phase) - case AllocateDisks, CopyDisks: + case CopyDisks: step, found := vm.FindStep(r.step(vm)) if !found { vm.AddError(fmt.Sprintf("Step '%s' not found", r.step(vm))) @@ -1255,7 +1250,7 @@ func (r *Migration) buildPipeline(vm *plan.VM) (pipeline []*plan.Step, err error Phase: Pending, }, }) - case AllocateDisks, CopyDisks, CopyDisksVirtV2V, ConvertOpenstackSnapshot: + case CopyDisks, CopyDisksVirtV2V, ConvertOpenstackSnapshot: tasks, pErr := r.builder.Tasks(vm.Ref) if pErr != nil { err = liberr.Wrap(pErr) @@ -1270,9 +1265,6 @@ func (r *Migration) buildPipeline(vm *plan.VM) (pipeline []*plan.Step, err error case CopyDisks: task_name = DiskTransfer task_description = "Transfer disks." - case AllocateDisks: - task_name = DiskAllocation - task_description = "Allocate disks." case CopyDisksVirtV2V: task_name = DiskTransferV2v task_description = "Copy disks." @@ -1658,11 +1650,7 @@ func (r *Migration) updateConversionProgress(vm *plan.VMStatus, step *plan.Step) break } - coldLocal, err := r.Context.Plan.VSphereColdLocal() - switch { - case err != nil: - return liberr.Wrap(err) - case coldLocal: + if r.Context.Plan.IsSourceProviderOVA() { if err := r.updateConversionProgressV2vMonitor(pod, step); err != nil { // Just log it. Missing progress is not fatal. log.Error(err, "Failed to update conversion progress") @@ -1853,12 +1841,6 @@ type Predicate struct { // Evaluate predicate flags. func (r *Predicate) Evaluate(flag libitr.Flag) (allowed bool, err error) { - coldLocal, vErr := r.context.Plan.VSphereColdLocal() - if vErr != nil { - err = vErr - return - } - switch flag { case HasPreHook: _, allowed = r.vm.FindHook(PreHook) @@ -1866,10 +1848,8 @@ func (r *Predicate) Evaluate(flag libitr.Flag) (allowed bool, err error) { _, allowed = r.vm.FindHook(PostHook) case RequiresConversion: allowed = r.context.Source.Provider.RequiresConversion() - case CDIDiskCopy: - allowed = !coldLocal - case VirtV2vDiskCopy: - allowed = coldLocal + case OvaImageMigration: + allowed = r.context.Plan.IsSourceProviderOVA() case OpenstackImageMigration: allowed = r.context.Plan.IsSourceProviderOpenstack() } diff --git a/pkg/controller/plan/scheduler/vsphere/scheduler.go b/pkg/controller/plan/scheduler/vsphere/scheduler.go index 82cb722b8..6be0f127d 100644 --- a/pkg/controller/plan/scheduler/vsphere/scheduler.go +++ b/pkg/controller/plan/scheduler/vsphere/scheduler.go @@ -197,27 +197,15 @@ func (r *Scheduler) buildPending() (err error) { } func (r *Scheduler) cost(vm *model.VM, vmStatus *plan.VMStatus) int { - coldLocal, _ := r.Plan.VSphereColdLocal() - if coldLocal { - switch vmStatus.Phase { - case CreateVM, PostHook, Completed: - // In these phases we already have the disk transferred and are left only to create the VM - // By setting the cost to 0 other VMs can start migrating - return 0 - default: - return 1 - } - } else { - switch vmStatus.Phase { - case CreateVM, PostHook, Completed, CopyingPaused, ConvertGuest, CreateGuestConversionPod: - // The warm/remote migrations this is done on already transferred disks, - // and we can start other VM migrations at these point. - // By setting the cost to 0 other VMs can start migrating - return 0 - default: - // CDI transfers the disks in parallel by different pods - return len(vm.Disks) - r.finishedDisks(vmStatus) - } + switch vmStatus.Phase { + case CreateVM, PostHook, Completed, CopyingPaused, ConvertGuest, CreateGuestConversionPod: + // The warm/remote migrations this is done on already transferred disks, + // and we can start other VM migrations at these point. + // By setting the cost to 0 other VMs can start migrating + return 0 + default: + // CDI transfers the disks in parallel by different pods + return len(vm.Disks) - r.finishedDisks(vmStatus) } } diff --git a/pkg/forklift-api/webhooks/validating-webhook/admitters/plan-admitter.go b/pkg/forklift-api/webhooks/validating-webhook/admitters/plan-admitter.go index afa3b4881..775e2a371 100644 --- a/pkg/forklift-api/webhooks/validating-webhook/admitters/plan-admitter.go +++ b/pkg/forklift-api/webhooks/validating-webhook/admitters/plan-admitter.go @@ -108,17 +108,6 @@ func (admitter *PlanAdmitter) validateLUKS() error { log.Error(err, "Provider type (non-VSphere & non-OVA) does not support LUKS") return err } - - coldLocal, vErr := admitter.plan.VSphereColdLocal() - if vErr != nil { - log.Error(vErr, "Could not analyze plan, failing") - return vErr - } - if !coldLocal { - err := liberr.New("migration of encrypted disks is not supported for warm migrations or migrations to remote providers") - log.Error(err, "Warm migration does not support LUKS") - return err - } return nil } diff --git a/virt-v2v/cmd/entrypoint.go b/virt-v2v/cmd/entrypoint.go index ca9b34a58..879ea0419 100644 --- a/virt-v2v/cmd/entrypoint.go +++ b/virt-v2v/cmd/entrypoint.go @@ -23,11 +23,11 @@ func main() { os.Exit(1) } - // virt-v2v or virt-v2v-in-place - if _, found := os.LookupEnv("V2V_inPlace"); found { - err = runVirtV2vInPlace() + // virt-v2v-in-place + if source := os.Getenv("V2V_source"); source == global.OVA { + err = runVirtV2vOVA() } else { - err = runVirtV2v() + err = runVirtV2vInPlace() } if err != nil { fmt.Println("Failed to execute virt-v2v command:", err) @@ -101,13 +101,51 @@ func runVirtV2vInPlace() error { return v2vCmd.Run() } +func runVirtV2vOVA() error { + args, err := virtV2vBuildCommand() + if err != nil { + return err + } + v2vCmd := exec.Command("virt-v2v", args...) + // The virt-v2v-monitor reads the virt-v2v stdout and processes it and exposes the progress of the migration. + monitorCmd := exec.Command("/usr/local/bin/virt-v2v-monitor") + monitorCmd.Stdout = os.Stdout + monitorCmd.Stderr = os.Stderr + + var writer *io.PipeWriter + monitorCmd.Stdin, writer = io.Pipe() + v2vCmd.Stdout = writer + v2vCmd.Stderr = writer + defer writer.Close() + + if err := monitorCmd.Start(); err != nil { + fmt.Printf("Error executing monitor command: %v\n", err) + return err + } + + fmt.Println("exec:", v2vCmd) + if err := v2vCmd.Run(); err != nil { + fmt.Printf("Error executing v2v command: %v\n", err) + return err + } + + // virt-v2v is done, we can close the pipe to virt-v2v-monitor + writer.Close() + + if err := monitorCmd.Wait(); err != nil { + fmt.Printf("Error waiting for virt-v2v-monitor to finish: %v\n", err) + return err + } + + return nil +} + func virtV2vBuildCommand() (args []string, err error) { args = []string{"-v", "-x"} source := os.Getenv("V2V_source") requiredEnvVars := map[string][]string{ - global.VSPHERE: {"V2V_libvirtURL", "V2V_secretKey", "V2V_vmName"}, - global.OVA: {"V2V_diskPath", "V2V_vmName"}, + global.OVA: {"V2V_diskPath", "V2V_vmName"}, } if envVars, ok := requiredEnvVars[source]; ok { @@ -121,47 +159,11 @@ func virtV2vBuildCommand() (args []string, err error) { } return nil, fmt.Errorf("virt-v2v supports the following providers: {%v}. Provided: %s\n", strings.Join(providers, ", "), source) } - args = append(args, "-o", "kubevirt", "-os", global.DIR) - - switch source { - case global.VSPHERE: - vsphereArgs, err := virtV2vVsphereArgs() - if err != nil { - return nil, err - } - args = append(args, vsphereArgs...) - case global.OVA: - args = append(args, "-i", "ova", os.Getenv("V2V_diskPath")) - } + args = append(args, "-o", "kubevirt", "-os", global.DIR, "-i", "ova", os.Getenv("V2V_diskPath")) return args, nil } -func virtV2vVsphereArgs() (args []string, err error) { - args = append(args, "-i", "libvirt", "-ic", os.Getenv("V2V_libvirtURL")) - args = append(args, "-ip", "/etc/secret/secretKey") - args, err = addCommonArgs(args) - if err != nil { - return nil, err - } - if info, err := os.Stat(global.VDDK); err == nil && info.IsDir() { - args = append(args, - "-it", "vddk", - "-io", fmt.Sprintf("vddk-libdir=%s", global.VDDK), - "-io", fmt.Sprintf("vddk-thumbprint=%s", os.Getenv("V2V_fingerprint")), - ) - } - - // When converting VM with name that do not meet DNS1123 RFC requirements, - // it should be changed to supported one to ensure the conversion does not fail. - if utils.CheckEnvVariablesSet("V2V_NewName") { - args = append(args, "-on", os.Getenv("V2V_NewName")) - } - - args = append(args, "--", os.Getenv("V2V_vmName")) - return args, nil -} - // addCommonArgs adds a v2v arguments which is used for both virt-v2v and virt-v2v-in-place func addCommonArgs(args []string) ([]string, error) { // Allow specifying which disk should be the bootable disk @@ -196,67 +198,9 @@ func addCommonArgs(args []string) ([]string, error) { return args, nil } -func runVirtV2v() error { - args, err := virtV2vBuildCommand() - if err != nil { - return err - } - v2vCmd := exec.Command("virt-v2v", args...) - // The virt-v2v-monitor reads the virt-v2v stdout and processes it and exposes the progress of the migration. - monitorCmd := exec.Command("/usr/local/bin/virt-v2v-monitor") - monitorCmd.Stdout = os.Stdout - monitorCmd.Stderr = os.Stderr - - var writer *io.PipeWriter - monitorCmd.Stdin, writer = io.Pipe() - v2vCmd.Stdout = writer - v2vCmd.Stderr = writer - defer writer.Close() - - if err := monitorCmd.Start(); err != nil { - fmt.Printf("Error executing monitor command: %v\n", err) - return err - } - - fmt.Println("exec:", v2vCmd) - if err := v2vCmd.Run(); err != nil { - fmt.Printf("Error executing v2v command: %v\n", err) - return err - } - - // virt-v2v is done, we can close the pipe to virt-v2v-monitor - writer.Close() - - if err := monitorCmd.Wait(); err != nil { - fmt.Printf("Error waiting for virt-v2v-monitor to finish: %v\n", err) - return err - } - - return nil -} - // VirtV2VPrepEnvironment used in the cold migration. // It creates a links between the downloaded guest image from virt-v2v and mounted PVC. func virtV2VPrepEnvironment() (err error) { - source := os.Getenv("V2V_source") - _, inplace := os.LookupEnv("V2V_inPlace") - if source == global.VSPHERE && !inplace { - if _, err := os.Stat("/etc/secret/cacert"); err == nil { - // use the specified certificate - err = os.Symlink("/etc/secret/cacert", "/opt/ca-bundle.crt") - if err != nil { - fmt.Println("Error creating ca cert link ", err) - os.Exit(1) - } - } else { - // otherwise, keep system pool certificates - err := os.Symlink("/etc/pki/tls/certs/ca-bundle.crt.bak", "/opt/ca-bundle.crt") - if err != nil { - fmt.Println("Error creating ca cert link ", err) - os.Exit(1) - } - } - } if err = os.MkdirAll(global.DIR, os.ModePerm); err != nil { return fmt.Errorf("Error creating directory: %v", err) }