From bc7f240945451ed39d494f03442fa12122128164 Mon Sep 17 00:00:00 2001 From: Arik Hadas Date: Tue, 9 Apr 2024 08:42:08 +0300 Subject: [PATCH] vSphere: fix crash due to disk without a datastore Previously, we assumed all disks of types VirtualDiskFlatVer1BackingInfo, VirtualDiskFlatVer2BackingInfo and VirtualDiskRawDiskMappingVer1BackingInfo are located on a datastore. The only exception was disks of type VirtualDiskRawDiskVer2BackingInfo for which we didn't assume they are located on a datastore but for them RDM is set to true and so we have a critical concern saying VMs which such disks cannot be migrated. However, the 'Datastore' property is optional and according to the vSphere documentation: "If the file is not located on a datastore, then this reference is null.". We noticed that this could actually happen, even though we were not able to reproduce it on our development environments, and once it happens forklift-controller crashes while creating the inventory. Therefore this PR introduces the following changes: 1. Make the code that builds the inventory more robust in terms of handling VMs with disks that are not located on a datastore 2. Add a critical concern for such VMs with disks that are not located on a datastore, saying such VMs cannot be migrated (later on we will anyway fail the validation of a plan due to unmapped storage in this case, even if we don't block migration of VMs with critical concerns) Signed-off-by: Arik Hadas --- .../provider/container/vsphere/model.go | 26 +++++++++------ .../konveyor/forklift/vmware/datastore.rego | 15 +++++++++ .../forklift/vmware/datastore_test.rego | 33 +++++++++++++++++++ 3 files changed, 64 insertions(+), 10 deletions(-) create mode 100644 validation/policies/io/konveyor/forklift/vmware/datastore.rego create mode 100644 validation/policies/io/konveyor/forklift/vmware/datastore_test.rego diff --git a/pkg/controller/provider/container/vsphere/model.go b/pkg/controller/provider/container/vsphere/model.go index 45ed63eac..63cb19904 100644 --- a/pkg/controller/provider/container/vsphere/model.go +++ b/pkg/controller/provider/container/vsphere/model.go @@ -688,44 +688,50 @@ func (v *VmAdapter) updateDisks(devArray *types.ArrayOfVirtualDevice) { switch disk.Backing.(type) { case *types.VirtualDiskFlatVer1BackingInfo: backing := disk.Backing.(*types.VirtualDiskFlatVer1BackingInfo) - datastoreId, _ := sanitize(backing.Datastore.Value) md := model.Disk{ Key: disk.Key, File: backing.FileName, Capacity: disk.CapacityInBytes, - Datastore: model.Ref{ + } + if backing.Datastore != nil { + datastoreId, _ := sanitize(backing.Datastore.Value) + md.Datastore = model.Ref{ Kind: model.DsKind, ID: datastoreId, - }, + } } disks = append(disks, md) case *types.VirtualDiskFlatVer2BackingInfo: backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) - datastoreId, _ := sanitize(backing.Datastore.Value) md := model.Disk{ Key: disk.Key, File: backing.FileName, Capacity: disk.CapacityInBytes, Shared: backing.Sharing != "sharingNone", - Datastore: model.Ref{ + } + if backing.Datastore != nil { + datastoreId, _ := sanitize(backing.Datastore.Value) + md.Datastore = model.Ref{ Kind: model.DsKind, ID: datastoreId, - }, + } } disks = append(disks, md) case *types.VirtualDiskRawDiskMappingVer1BackingInfo: backing := disk.Backing.(*types.VirtualDiskRawDiskMappingVer1BackingInfo) - datastoreId, _ := sanitize(backing.Datastore.Value) md := model.Disk{ Key: disk.Key, File: backing.FileName, Capacity: disk.CapacityInBytes, Shared: backing.Sharing != "sharingNone", - Datastore: model.Ref{ + RDM: true, + } + if backing.Datastore != nil { + datastoreId, _ := sanitize(backing.Datastore.Value) + md.Datastore = model.Ref{ Kind: model.DsKind, ID: datastoreId, - }, - RDM: true, + } } disks = append(disks, md) case *types.VirtualDiskRawDiskVer2BackingInfo: diff --git a/validation/policies/io/konveyor/forklift/vmware/datastore.rego b/validation/policies/io/konveyor/forklift/vmware/datastore.rego new file mode 100644 index 000000000..a888a853c --- /dev/null +++ b/validation/policies/io/konveyor/forklift/vmware/datastore.rego @@ -0,0 +1,15 @@ +package io.konveyor.forklift.vmware + +null_datastore { + some i + count(input.disks[i].datastore.id) == 0 +} + +concerns[flag] { + null_datastore + flag := { + "category": "Critical", + "label": "Disk is not located on a datastore", + "assessment": "The VM is configured with a disk that is not located on a datastore. The VM cannot be migrated" + } +} diff --git a/validation/policies/io/konveyor/forklift/vmware/datastore_test.rego b/validation/policies/io/konveyor/forklift/vmware/datastore_test.rego new file mode 100644 index 000000000..00aee91e2 --- /dev/null +++ b/validation/policies/io/konveyor/forklift/vmware/datastore_test.rego @@ -0,0 +1,33 @@ +package io.konveyor.forklift.vmware + +test_with_no_disks { + mock_vm := { + "name": "test", + "disks": [] + } + results := concerns with input as mock_vm + count(results) == 0 +} + +test_with_valid_disk { + mock_vm := { + "name": "test", + "disks": [ + { "datastore": { "kind": "datastore", "id": "datastore-1" } } + ] + } + results := concerns with input as mock_vm + count(results) == 0 +} + +test_with_invalid_disk { + mock_vm := { + "name": "test", + "disks": [ + { "datastore": { "kind": "datastore", "id": "datastore-1" } }, + { "datastore": { "kind": "", "id": "" } }, + ] + } + results := concerns with input as mock_vm + count(results) == 1 +}