Skip to content

Commit

Permalink
vSphere: fix crash due to disk without a datastore
Browse files Browse the repository at this point in the history
Previously, we assumed all disks of types
VirtualDiskFlatVer1BackingInfo, VirtualDiskFlatVer2BackingInfo and
VirtualDiskRawDiskMappingVer1BackingInfo are located on a datastore. The
only exception was disks of type VirtualDiskRawDiskVer2BackingInfo for
which we didn't assume they are located on a datastore but for them RDM
is set to true and so we have a critical concern saying VMs which such
disks cannot be migrated.

However, the 'Datastore' property is optional and according to the
vSphere documentation: "If the file is not located on a datastore, then
this reference is null.". We noticed that this could actually happen,
even though we were not able to reproduce it on our development
environments, and once it happens forklift-controller crashes while
creating the inventory.

Therefore this PR introduces the following changes:
1. Make the code that builds the inventory more robust in terms of
   handling VMs with disks that are not located on a datastore
2. Add a critical concern for such VMs with disks that are not located
   on a datastore, saying such VMs cannot be migrated (later on we will
   anyway fail the validation of a plan due to unmapped storage in this
   case, even if we don't block migration of VMs with critical concerns)

Signed-off-by: Arik Hadas <[email protected]>
  • Loading branch information
ahadas committed May 2, 2024
1 parent 81db7e6 commit bc7f240
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 10 deletions.
26 changes: 16 additions & 10 deletions pkg/controller/provider/container/vsphere/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -688,44 +688,50 @@ func (v *VmAdapter) updateDisks(devArray *types.ArrayOfVirtualDevice) {
switch disk.Backing.(type) {
case *types.VirtualDiskFlatVer1BackingInfo:
backing := disk.Backing.(*types.VirtualDiskFlatVer1BackingInfo)
datastoreId, _ := sanitize(backing.Datastore.Value)
md := model.Disk{
Key: disk.Key,
File: backing.FileName,
Capacity: disk.CapacityInBytes,
Datastore: model.Ref{
}
if backing.Datastore != nil {
datastoreId, _ := sanitize(backing.Datastore.Value)
md.Datastore = model.Ref{
Kind: model.DsKind,
ID: datastoreId,
},
}
}
disks = append(disks, md)
case *types.VirtualDiskFlatVer2BackingInfo:
backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
datastoreId, _ := sanitize(backing.Datastore.Value)
md := model.Disk{
Key: disk.Key,
File: backing.FileName,
Capacity: disk.CapacityInBytes,
Shared: backing.Sharing != "sharingNone",
Datastore: model.Ref{
}
if backing.Datastore != nil {
datastoreId, _ := sanitize(backing.Datastore.Value)
md.Datastore = model.Ref{
Kind: model.DsKind,
ID: datastoreId,
},
}
}
disks = append(disks, md)
case *types.VirtualDiskRawDiskMappingVer1BackingInfo:
backing := disk.Backing.(*types.VirtualDiskRawDiskMappingVer1BackingInfo)
datastoreId, _ := sanitize(backing.Datastore.Value)
md := model.Disk{
Key: disk.Key,
File: backing.FileName,
Capacity: disk.CapacityInBytes,
Shared: backing.Sharing != "sharingNone",
Datastore: model.Ref{
RDM: true,
}
if backing.Datastore != nil {
datastoreId, _ := sanitize(backing.Datastore.Value)
md.Datastore = model.Ref{
Kind: model.DsKind,
ID: datastoreId,
},
RDM: true,
}
}
disks = append(disks, md)
case *types.VirtualDiskRawDiskVer2BackingInfo:
Expand Down
15 changes: 15 additions & 0 deletions validation/policies/io/konveyor/forklift/vmware/datastore.rego
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
package io.konveyor.forklift.vmware

null_datastore {
some i
count(input.disks[i].datastore.id) == 0
}

concerns[flag] {
null_datastore
flag := {
"category": "Critical",
"label": "Disk is not located on a datastore",
"assessment": "The VM is configured with a disk that is not located on a datastore. The VM cannot be migrated"
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
package io.konveyor.forklift.vmware

test_with_no_disks {
mock_vm := {
"name": "test",
"disks": []
}
results := concerns with input as mock_vm
count(results) == 0
}

test_with_valid_disk {
mock_vm := {
"name": "test",
"disks": [
{ "datastore": { "kind": "datastore", "id": "datastore-1" } }
]
}
results := concerns with input as mock_vm
count(results) == 0
}

test_with_invalid_disk {
mock_vm := {
"name": "test",
"disks": [
{ "datastore": { "kind": "datastore", "id": "datastore-1" } },
{ "datastore": { "kind": "", "id": "" } },
]
}
results := concerns with input as mock_vm
count(results) == 1
}

0 comments on commit bc7f240

Please sign in to comment.