From 4dd3ada87fbaee9e50e4a8b2f9dbda34804a73dd Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Mon, 16 Dec 2024 19:18:33 +0100 Subject: [PATCH] test --- scheduler/generic_sched.go | 4 +- scheduler/generic_sched_test.go | 118 ++++++++++++++++++++++++++++++++ 2 files changed, 120 insertions(+), 2 deletions(-) diff --git a/scheduler/generic_sched.go b/scheduler/generic_sched.go index 81d4a18c64d..256c6272a83 100644 --- a/scheduler/generic_sched.go +++ b/scheduler/generic_sched.go @@ -696,8 +696,6 @@ func (s *GenericScheduler) computePlacements(destructive, place []placementResul if len(newHostVolumeIDs) > 0 { alloc.HostVolumeIDs = newHostVolumeIDs - } else { - alloc.HostVolumeIDs = prevAllocation.HostVolumeIDs } // If the new allocation is replacing an older allocation then we @@ -708,6 +706,8 @@ func (s *GenericScheduler) computePlacements(destructive, place []placementResul updateRescheduleTracker(alloc, prevAllocation, now) } + alloc.HostVolumeIDs = prevAllocation.HostVolumeIDs + // If the allocation has task handles, // copy them to the new allocation propagateTaskState(alloc, prevAllocation, missing.PreviousLost()) diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index adda5e2cb2a..371b75999d1 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -218,6 +218,124 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { } } +func TestServiceSched_JobRegister_StickyVolumes(t *testing.T) { + ci.Parallel(t) + + h := NewHarness(t) + + nodes := []*structs.Node{ + mock.Node(), + mock.Node(), + } + + hostVolCapsReadWrite := []*structs.HostVolumeCapability{ + { + AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, + AccessMode: structs.HostVolumeAccessModeSingleNodeReader, + }, + { + AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, + AccessMode: structs.HostVolumeAccessModeSingleNodeWriter, + }, + } + + dhv := &structs.HostVolume{ + Namespace: structs.DefaultNamespace, + ID: uuid.Generate(), + Name: "foo", + NodeID: nodes[1].ID, + RequestedCapabilities: hostVolCapsReadWrite, + State: structs.HostVolumeStateReady, + } + + nodes[0].HostVolumes = map[string]*structs.ClientHostVolumeConfig{} + nodes[1].HostVolumes = map[string]*structs.ClientHostVolumeConfig{"foo": {ID: dhv.ID}} + + for _, node := range nodes { + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) + } + must.NoError(t, h.State.UpsertHostVolume(1000, dhv)) + + stickyRequest := map[string]*structs.VolumeRequest{ + "foo": { + Type: "host", + Source: "foo", + Sticky: true, + AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + }, + } + + // Create a job + job := mock.Job() + job.TaskGroups[0].Volumes = stickyRequest + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + + // Create a mock evaluation to register the job + eval := &structs.Evaluation{ + Namespace: structs.DefaultNamespace, + ID: uuid.Generate(), + Priority: job.Priority, + TriggeredBy: structs.EvalTriggerJobRegister, + JobID: job.ID, + Status: structs.EvalStatusPending, + } + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + + // Process the evaluation + must.NoError(t, h.Process(NewServiceScheduler, eval)) + + // Ensure the plan allocated + plan := h.Plans[0] + planned := make(map[string]*structs.Allocation) + for _, allocList := range plan.NodeAllocation { + for _, alloc := range allocList { + planned[alloc.ID] = alloc + } + } + must.MapLen(t, 10, planned) + + // Ensure that the allocations got the host volume ID added + for _, p := range planned { + must.Eq(t, p.PreviousAllocation, "") + must.Eq(t, p.HostVolumeIDs[0], dhv.ID) + } + + // Update the job to force a rolling upgrade + updated := job.Copy() + updated.TaskGroups[0].Tasks[0].Resources.CPU += 10 + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, updated)) + + // Create a mock evaluation to handle the update + eval = &structs.Evaluation{ + Namespace: structs.DefaultNamespace, + ID: uuid.Generate(), + Priority: job.Priority, + TriggeredBy: structs.EvalTriggerNodeUpdate, + JobID: job.ID, + Status: structs.EvalStatusPending, + } + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + h1 := NewHarnessWithState(t, h.State) + must.NoError(t, h1.Process(NewServiceScheduler, eval)) + + // Ensure we have created only one new allocation + // Ensure a single plan + must.SliceLen(t, 1, h1.Plans) + plan = h1.Plans[0] + var newPlanned []*structs.Allocation + for _, allocList := range plan.NodeAllocation { + newPlanned = append(newPlanned, allocList...) + } + must.SliceLen(t, 10, newPlanned) + + // Ensure that the new allocations retain the host volume ID + for _, new := range newPlanned { + must.NotEq(t, new.PreviousAllocation, "") + must.Eq(t, new.HostVolumeIDs[0], dhv.ID) + } +} + func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { ci.Parallel(t)