diff --git a/pkg/controller/provider/container/vsphere/model.go b/pkg/controller/provider/container/vsphere/model.go index 45ed63eac..63cb19904 100644 --- a/pkg/controller/provider/container/vsphere/model.go +++ b/pkg/controller/provider/container/vsphere/model.go @@ -688,44 +688,50 @@ func (v *VmAdapter) updateDisks(devArray *types.ArrayOfVirtualDevice) { switch disk.Backing.(type) { case *types.VirtualDiskFlatVer1BackingInfo: backing := disk.Backing.(*types.VirtualDiskFlatVer1BackingInfo) - datastoreId, _ := sanitize(backing.Datastore.Value) md := model.Disk{ Key: disk.Key, File: backing.FileName, Capacity: disk.CapacityInBytes, - Datastore: model.Ref{ + } + if backing.Datastore != nil { + datastoreId, _ := sanitize(backing.Datastore.Value) + md.Datastore = model.Ref{ Kind: model.DsKind, ID: datastoreId, - }, + } } disks = append(disks, md) case *types.VirtualDiskFlatVer2BackingInfo: backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) - datastoreId, _ := sanitize(backing.Datastore.Value) md := model.Disk{ Key: disk.Key, File: backing.FileName, Capacity: disk.CapacityInBytes, Shared: backing.Sharing != "sharingNone", - Datastore: model.Ref{ + } + if backing.Datastore != nil { + datastoreId, _ := sanitize(backing.Datastore.Value) + md.Datastore = model.Ref{ Kind: model.DsKind, ID: datastoreId, - }, + } } disks = append(disks, md) case *types.VirtualDiskRawDiskMappingVer1BackingInfo: backing := disk.Backing.(*types.VirtualDiskRawDiskMappingVer1BackingInfo) - datastoreId, _ := sanitize(backing.Datastore.Value) md := model.Disk{ Key: disk.Key, File: backing.FileName, Capacity: disk.CapacityInBytes, Shared: backing.Sharing != "sharingNone", - Datastore: model.Ref{ + RDM: true, + } + if backing.Datastore != nil { + datastoreId, _ := sanitize(backing.Datastore.Value) + md.Datastore = model.Ref{ Kind: model.DsKind, ID: datastoreId, - }, - RDM: true, + } } disks = append(disks, md) case *types.VirtualDiskRawDiskVer2BackingInfo: diff --git a/validation/policies/io/konveyor/forklift/vmware/datastore.rego b/validation/policies/io/konveyor/forklift/vmware/datastore.rego new file mode 100644 index 000000000..a888a853c --- /dev/null +++ b/validation/policies/io/konveyor/forklift/vmware/datastore.rego @@ -0,0 +1,15 @@ +package io.konveyor.forklift.vmware + +null_datastore { + some i + count(input.disks[i].datastore.id) == 0 +} + +concerns[flag] { + null_datastore + flag := { + "category": "Critical", + "label": "Disk is not located on a datastore", + "assessment": "The VM is configured with a disk that is not located on a datastore. The VM cannot be migrated" + } +} diff --git a/validation/policies/io/konveyor/forklift/vmware/datastore_test.rego b/validation/policies/io/konveyor/forklift/vmware/datastore_test.rego new file mode 100644 index 000000000..00aee91e2 --- /dev/null +++ b/validation/policies/io/konveyor/forklift/vmware/datastore_test.rego @@ -0,0 +1,33 @@ +package io.konveyor.forklift.vmware + +test_with_no_disks { + mock_vm := { + "name": "test", + "disks": [] + } + results := concerns with input as mock_vm + count(results) == 0 +} + +test_with_valid_disk { + mock_vm := { + "name": "test", + "disks": [ + { "datastore": { "kind": "datastore", "id": "datastore-1" } } + ] + } + results := concerns with input as mock_vm + count(results) == 0 +} + +test_with_invalid_disk { + mock_vm := { + "name": "test", + "disks": [ + { "datastore": { "kind": "datastore", "id": "datastore-1" } }, + { "datastore": { "kind": "", "id": "" } }, + ] + } + results := concerns with input as mock_vm + count(results) == 1 +}