Skip to content

Commit

Permalink
NVMe: Adding support for NDVP
Browse files Browse the repository at this point in the history
  • Loading branch information
bhatnag authored Aug 9, 2023
1 parent 9602d97 commit 7270530
Show file tree
Hide file tree
Showing 8 changed files with 116 additions and 61 deletions.
11 changes: 9 additions & 2 deletions core/orchestrator_core.go
Original file line number Diff line number Diff line change
Expand Up @@ -3441,8 +3441,15 @@ func (o *TridentOrchestrator) AttachVolume(
return utils.MountDevice(ctx, loopDeviceName, mountpoint, publishInfo.SubvolumeMountOptions, isRawBlock)
}
} else {
_, err := utils.AttachISCSIVolumeRetry(ctx, volumeName, mountpoint, publishInfo, map[string]string{},
AttachISCSIVolumeTimeoutLong)
var err error
if publishInfo.SANType == sa.NVMe {
err = utils.AttachNVMeVolumeRetry(ctx, volumeName, mountpoint, publishInfo, map[string]string{}, utils.NVMeAttachTimeout)
}

if publishInfo.SANType == sa.ISCSI {
_, err = utils.AttachISCSIVolumeRetry(ctx, volumeName, mountpoint, publishInfo, map[string]string{},
AttachISCSIVolumeTimeoutLong)
}
return err
}
}
Expand Down
3 changes: 1 addition & 2 deletions frontend/csi/node_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ const (
nvmeSelfHealingLockContext = "NVMeSelfHealingThread"
defaultNodeReconciliationPeriod = 1 * time.Minute
maxJitterValue = 5000
nvmeAttachTimeout = 20 * time.Second
nvmeMaxFlushWaitDuration = 6 * time.Minute
)

Expand Down Expand Up @@ -2195,7 +2194,7 @@ func (p *Plugin) nodeStageNVMeVolume(
publishInfo.SANType = req.PublishContext["SANType"]

if err := utils.AttachNVMeVolumeRetry(ctx, req.VolumeContext["internalName"], "", publishInfo, nil,
nvmeAttachTimeout); err != nil {
utils.NVMeAttachTimeout); err != nil {
return err
}

Expand Down
6 changes: 6 additions & 0 deletions storage_drivers/ontap/api/abstraction_rest.go
Original file line number Diff line number Diff line change
Expand Up @@ -460,6 +460,11 @@ func namespaceInfoFromRestAttrsHelper(namespaceGetResponse *models.NvmeNamespace
state = *namespaceGetResponse.Status.State
}

comment := ""
if namespaceGetResponse.Comment != nil {
comment = *namespaceGetResponse.Comment
}

nsInfo := &NVMeNamespace{
UUID: nsUUID,
Name: name,
Expand All @@ -468,6 +473,7 @@ func namespaceInfoFromRestAttrsHelper(namespaceGetResponse *models.NvmeNamespace
Size: size,
BlockSize: blockSize,
State: state,
Comment: comment,
}

return nsInfo, nil
Expand Down
1 change: 1 addition & 0 deletions storage_drivers/ontap/ontap_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -698,6 +698,7 @@ func PublishLUN(
publishInfo.IscsiTargetPortal = filteredIPs[0]
publishInfo.IscsiPortals = filteredIPs[1:]
publishInfo.IscsiTargetIQN = iSCSINodeName
publishInfo.SANType = sa.ISCSI

if igroupName != "" {
addUniqueIscsiIGroupName(publishInfo, igroupName)
Expand Down
68 changes: 38 additions & 30 deletions storage_drivers/ontap/ontap_san_nvme.go
Original file line number Diff line number Diff line change
Expand Up @@ -720,7 +720,42 @@ func (d *NVMeStorageDriver) Publish(
return errors.UnsupportedError(fmt.Sprintf("the volume %v is not enabled for read or writes", name))
}

if publishInfo.Localhost {
// Get its HostNQN and populate it in publishInfo.
nqn, err := utils.GetHostNqn(ctx)
if err != nil {
return err
}
publishInfo.HostNQN = nqn
}

nsPath := volConfig.InternalID
nsUUID := volConfig.AccessInfo.NVMeNamespaceUUID
// For docker context, some of the attributes like fsType, luks needs to be
// fetched from namespace where they were stored while creating the namespace.
if d.Config.DriverContext == tridentconfig.ContextDocker {
ns, err := d.API.NVMeNamespaceGetByName(ctx, nsPath)
if err != nil {
return fmt.Errorf("Problem fetching namespace %v. Error:%v", nsPath, err)
}
if ns == nil {
return fmt.Errorf("Namespace %v not found", nsPath)
}
nsAttrs, err := d.ParseNVMeNamespaceCommentString(ctx, ns.Comment)
publishInfo.FilesystemType = nsAttrs[nsAttributeFSType]
publishInfo.LUKSEncryption = nsAttrs[nsAttributeLUKS]
} else {
publishInfo.FilesystemType = volConfig.FileSystem
publishInfo.LUKSEncryption = volConfig.LUKSEncryption
}

// Get host nqn
if publishInfo.HostNQN == "" {
Logc(ctx).Error("Host NQN is empty")
return fmt.Errorf("hostNQN not found")
} else {
Logc(ctx).Debug("Host NQN is ", publishInfo.HostNQN)
}

// When FS type is RAW, we create a new subsystem per namespace,
// else we use the subsystem created for that particular node
Expand All @@ -745,43 +780,15 @@ func (d *NVMeStorageDriver) Publish(
// Fill important info in publishInfo
publishInfo.NVMeSubsystemNQN = subsystem.NQN
publishInfo.NVMeSubsystemUUID = subsystem.UUID
publishInfo.NVMeNamespaceUUID = volConfig.AccessInfo.NVMeNamespaceUUID
publishInfo.NVMeNamespaceUUID = nsUUID
publishInfo.SANType = d.Config.SANType

// for docker context, some of the attributes like fsType, luks needs to be
// fetched from namespace where they were stored while creating the namespace
if d.Config.DriverContext == tridentconfig.ContextDocker {
ns, err := d.API.NVMeNamespaceGetByName(ctx, nsPath)
if err != nil {
return fmt.Errorf("Problem fetching namespace %v. Error:%v", nsPath, err)
}
if ns != nil {
return fmt.Errorf("Namespace %v not found", nsPath)
}
nsAttrs, err := d.ParseNVMeNamespaceCommentString(ctx, ns.Comment)
publishInfo.FilesystemType = nsAttrs[nsAttributeFSType]
publishInfo.LUKSEncryption = nsAttrs[nsAttributeLUKS]
} else {
publishInfo.FilesystemType = volConfig.FileSystem
publishInfo.LUKSEncryption = volConfig.LUKSEncryption
}

// Get host nqn
if publishInfo.HostNQN == "" {
Logc(ctx).Error("Host NQN is empty")
return fmt.Errorf("hostNQN not found")
} else {
Logc(ctx).Debug("Host NQN is ", publishInfo.HostNQN)
}

// Add HostNQN to the subsystem using api call
if err := d.API.NVMeAddHostToSubsystem(ctx, publishInfo.HostNQN, subsystem.UUID); err != nil {
Logc(ctx).Errorf("add host to subsystem failed, %v", err)
return err
}

nsUUID := volConfig.AccessInfo.NVMeNamespaceUUID

if err := d.API.NVMeEnsureNamespaceMapped(ctx, subsystem.UUID, nsUUID); err != nil {
return err
}
Expand Down Expand Up @@ -1134,7 +1141,8 @@ func (d *NVMeStorageDriver) getVolumeExternal(
BlockSize: "",
FileSystem: "",
}

volumeConfig.AccessInfo.NVMeAccessInfo.NVMeNamespaceUUID = ns.UUID
volumeConfig.InternalID = ns.Name
pool := drivers.UnsetPool
if len(volume.Aggregates) > 0 {
pool = volume.Aggregates[0]
Expand Down
85 changes: 58 additions & 27 deletions storage_drivers/ontap/ontap_san_nvme_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -924,69 +924,100 @@ func TestPublish(t *testing.T) {
UUID: "fakeUUID",
}

namespace := &api.NVMeNamespace{
UUID: "fakeNsUUID",
Comment: "fakeComment",
}
// case 1: error getting volume Info
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, fmt.Errorf("Error Getting Volume Info"))
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, fmt.Errorf("Error Getting Volume Info")).Times(1)

err := d.Publish(ctx, volConfig, publishInfo)

assert.Error(t, err)

// case 2: error getting volume Info
// case 2: success getting volume Info
flexVol.AccessType = VolTypeDP
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil)
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil).Times(1)

err = d.Publish(ctx, volConfig, publishInfo)

assert.Error(t, err)

// case 3: Error creating subsystem
// case 3: Error getting namespace in Docker context
flexVol.AccessType = VolTypeRW
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil)
mock.EXPECT().NVMeSubsystemCreate(ctx, "fakeHostName-fakeUUID").Return(subsystem, fmt.Errorf("Error creating subsystem"))
d.Config.DriverContext = tridentconfig.ContextDocker
publishInfo.HostNQN = "fakeHostNQN"
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil).Times(1)
mock.EXPECT().NVMeNamespaceGetByName(ctx, gomock.Any()).Return(nil, fmt.Errorf("Error getting namespace by name")).Times(1)

err = d.Publish(ctx, volConfig, publishInfo)

assert.Error(t, err)

// case 4: Host NQN not found in publish Info
flexVol.AccessType = VolTypeRW
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil)
mock.EXPECT().NVMeSubsystemCreate(ctx, "fakeHostName-fakeUUID").Return(subsystem, nil)
// case 4: Error getting namespace in Docker context
d.Config.DriverContext = tridentconfig.ContextDocker
publishInfo.HostNQN = "fakeHostNQN"
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil).Times(1)
mock.EXPECT().NVMeNamespaceGetByName(ctx, gomock.Any()).Return(nil, nil).Times(1)

err = d.Publish(ctx, volConfig, publishInfo)

assert.Error(t, err)

// case 5: Error while adding host nqn to subsystem
flexVol.AccessType = VolTypeRW
// case 5: Error creating subsystem in Docker context
d.Config.DriverContext = tridentconfig.ContextDocker
publishInfo.HostNQN = "fakeHostNQN"
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil)
mock.EXPECT().NVMeSubsystemCreate(ctx, "fakeHostName-fakeUUID").Return(subsystem, nil)
mock.EXPECT().NVMeAddHostToSubsystem(ctx, publishInfo.HostNQN, subsystem.UUID).Return(fmt.Errorf("Error adding host nqnq to subsystem"))
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil).Times(1)
mock.EXPECT().NVMeNamespaceGetByName(ctx, gomock.Any()).Return(namespace, nil).Times(1)
mock.EXPECT().NVMeSubsystemCreate(ctx, "fakeHostName-fakeUUID").Return(subsystem, fmt.Errorf("Error creating subsystem")).Times(1)

err = d.Publish(ctx, volConfig, publishInfo)

assert.Error(t, err)

// case 6: Error returned by NVMeEnsureNamespaceMapped
flexVol.AccessType = VolTypeRW
publishInfo.HostNQN = "fakeHostNQN"
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil)
mock.EXPECT().NVMeSubsystemCreate(ctx, "fakeHostName-fakeUUID").Return(subsystem, nil)
mock.EXPECT().NVMeAddHostToSubsystem(ctx, publishInfo.HostNQN, subsystem.UUID).Return(nil)
mock.EXPECT().NVMeEnsureNamespaceMapped(ctx, gomock.Any(), gomock.Any()).Return(fmt.Errorf("Error returned by NVMeEnsureNamespaceMapped"))
// case 6: Error creating subsystem in CSI Context
d.Config.DriverContext = tridentconfig.ContextCSI
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil).Times(1)
mock.EXPECT().NVMeSubsystemCreate(ctx, "fakeHostName-fakeUUID").Return(subsystem, fmt.Errorf("Error creating subsystem")).Times(1)

err = d.Publish(ctx, volConfig, publishInfo)

assert.Error(t, err)

// case 7: Success
// case 7: Host NQN not found in publish Info
flexVol.AccessType = VolTypeRW
publishInfo.HostNQN = ""
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil).Times(1)

err = d.Publish(ctx, volConfig, publishInfo)

assert.Error(t, err)

// case 8: Error while adding host nqn to subsystem
publishInfo.HostNQN = "fakeHostNQN"
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil)
mock.EXPECT().NVMeSubsystemCreate(ctx, "fakeHostName-fakeUUID").Return(subsystem, nil)
mock.EXPECT().NVMeAddHostToSubsystem(ctx, publishInfo.HostNQN, subsystem.UUID).Return(nil)
mock.EXPECT().NVMeEnsureNamespaceMapped(ctx, gomock.Any(), gomock.Any()).Return(nil)
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil).Times(1)
mock.EXPECT().NVMeSubsystemCreate(ctx, "fakeHostName-fakeUUID").Return(subsystem, nil).Times(1)
mock.EXPECT().NVMeAddHostToSubsystem(ctx, publishInfo.HostNQN, subsystem.UUID).Return(fmt.Errorf("Error adding host nqnq to subsystem")).Times(1)

err = d.Publish(ctx, volConfig, publishInfo)

assert.Error(t, err)

// case 9: Error returned by NVMeEnsureNamespaceMapped
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil).Times(1)
mock.EXPECT().NVMeSubsystemCreate(ctx, "fakeHostName-fakeUUID").Return(subsystem, nil).Times(1)
mock.EXPECT().NVMeAddHostToSubsystem(ctx, publishInfo.HostNQN, subsystem.UUID).Return(nil).Times(1)
mock.EXPECT().NVMeEnsureNamespaceMapped(ctx, gomock.Any(), gomock.Any()).Return(fmt.Errorf("Error returned by NVMeEnsureNamespaceMapped")).Times(1)

err = d.Publish(ctx, volConfig, publishInfo)

assert.Error(t, err)

// case 10: Success
mock.EXPECT().VolumeInfo(ctx, volConfig.InternalName).Return(flexVol, nil).Times(1)
mock.EXPECT().NVMeSubsystemCreate(ctx, "fakeHostName-fakeUUID").Return(subsystem, nil).Times(1)
mock.EXPECT().NVMeAddHostToSubsystem(ctx, publishInfo.HostNQN, subsystem.UUID).Return(nil).Times(1)
mock.EXPECT().NVMeEnsureNamespaceMapped(ctx, gomock.Any(), gomock.Any()).Return(nil).Times(1)

err = d.Publish(ctx, volConfig, publishInfo)

Expand Down
1 change: 1 addition & 0 deletions storage_drivers/solidfire/solidfire_san.go
Original file line number Diff line number Diff line change
Expand Up @@ -1224,6 +1224,7 @@ func (d *SANStorageDriver) Publish(
publishInfo.FilesystemType = fstype
publishInfo.UseCHAP = true
publishInfo.SharedTarget = false
publishInfo.SANType = sa.ISCSI

return nil
}
Expand Down
2 changes: 2 additions & 0 deletions utils/nvme.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ import (
"github.com/netapp/trident/utils/errors"
)

const NVMeAttachTimeout = 20 * time.Second

// getNVMeSubsystem returns the NVMe subsystem details.
func getNVMeSubsystem(ctx context.Context, subsysNqn string) (*NVMeSubsystem, error) {
Logc(ctx).Debug(">>>> nvme.getNVMeSubsystem")
Expand Down

0 comments on commit 7270530

Please sign in to comment.