From de4a098d7dcd750b89bde82bb867a2f3d44182be Mon Sep 17 00:00:00 2001 From: Diptanu Choudhury Date: Tue, 9 Aug 2016 14:48:25 -0700 Subject: [PATCH] Added scheduler tests --- scheduler/generic_sched.go | 5 +- scheduler/generic_sched_test.go | 99 +++++++++++++++++++++++++++++++++ scheduler/system_sched.go | 4 +- scheduler/system_sched_test.go | 59 ++++++++++++++++++++ 4 files changed, 162 insertions(+), 5 deletions(-) diff --git a/scheduler/generic_sched.go b/scheduler/generic_sched.go index 31df473da..fb28de234 100644 --- a/scheduler/generic_sched.go +++ b/scheduler/generic_sched.go @@ -356,13 +356,12 @@ func (s *GenericScheduler) computeJobAllocs() error { s.eval.JobID, err) } - // Update the allocations which are in pending/running state on tainted node - // to lost + // Update the allocations which are in pending/running state on tainted + // nodes to lost updateNonTerminalAllocsToLost(s.plan, tainted, allocs) // Filter out the allocations in a terminal state allocs = s.filterCompleteAllocs(allocs) - s.logger.Printf("len of allocs: %v", len(allocs)) // Diff the required and existing allocations diff := diffAllocs(s.job, tainted, groups, allocs) diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index f35a7b0ed..6c7d1cdc5 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -3,6 +3,7 @@ package scheduler import ( "fmt" "reflect" + "sort" "testing" "time" @@ -1436,6 +1437,104 @@ func TestServiceSched_NodeDrain(t *testing.T) { h.AssertEvalStatus(t, structs.EvalStatusComplete) } +func TestServiceSched_NodeDrain_Down(t *testing.T) { + h := NewHarness(t) + + // Register a draining node + node := mock.Node() + node.Drain = true + node.Status = structs.NodeStatusDown + noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + + // Generate a fake job with allocations + job := mock.Job() + noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + + var allocs []*structs.Allocation + for i := 0; i < 10; i++ { + alloc := mock.Alloc() + alloc.Job = job + alloc.JobID = job.ID + alloc.NodeID = node.ID + alloc.Name = fmt.Sprintf("my-job.web[%d]", i) + allocs = append(allocs, alloc) + } + noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + + // Set the desired state of the allocs to stop + var stop []*structs.Allocation + for i := 0; i < 10; i++ { + newAlloc := allocs[i].Copy() + newAlloc.ClientStatus = structs.AllocDesiredStatusStop + stop = append(stop, newAlloc) + } + noErr(t, h.State.UpsertAllocs(h.NextIndex(), stop)) + + // Mark some of the allocations as running + var running []*structs.Allocation + for i := 4; i < 6; i++ { + newAlloc := stop[i].Copy() + newAlloc.ClientStatus = structs.AllocClientStatusRunning + running = append(running, newAlloc) + } + noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), running)) + + // Mark some of the allocations as complete + var complete []*structs.Allocation + for i := 6; i < 10; i++ { + newAlloc := stop[i].Copy() + newAlloc.ClientStatus = structs.AllocClientStatusComplete + complete = append(complete, newAlloc) + } + noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), complete)) + + // Create a mock evaluation to deal with the node update + eval := &structs.Evaluation{ + ID: structs.GenerateUUID(), + Priority: 50, + TriggeredBy: structs.EvalTriggerNodeUpdate, + JobID: job.ID, + NodeID: node.ID, + } + + // Process the evaluation + err := h.Process(NewServiceScheduler, eval) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Ensure a single plan + if len(h.Plans) != 1 { + t.Fatalf("bad: %#v", h.Plans) + } + plan := h.Plans[0] + + // Ensure the plan evicted non terminal allocs + if len(plan.NodeUpdate[node.ID]) != 6 { + t.Fatalf("bad: %#v", plan) + } + + // Ensure that all the allocations which were in running or pending state + // has been marked as lost + var lostAllocs []string + for _, alloc := range plan.NodeUpdate[node.ID] { + lostAllocs = append(lostAllocs, alloc.ID) + } + sort.Strings(lostAllocs) + + var expectedLostAllocs []string + for i := 0; i < 6; i++ { + expectedLostAllocs = append(expectedLostAllocs, allocs[i].ID) + } + sort.Strings(expectedLostAllocs) + + if !reflect.DeepEqual(expectedLostAllocs, lostAllocs) { + t.Fatalf("expected: %v, actual: %v", expectedLostAllocs, lostAllocs) + } + + h.AssertEvalStatus(t, structs.EvalStatusComplete) +} + func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { h := NewHarness(t) diff --git a/scheduler/system_sched.go b/scheduler/system_sched.go index 75fe9f563..8493fea64 100644 --- a/scheduler/system_sched.go +++ b/scheduler/system_sched.go @@ -191,8 +191,8 @@ func (s *SystemScheduler) computeJobAllocs() error { s.eval.JobID, err) } - // Update the allocations which are in pending/running state on tainted node - // to lost + // Update the allocations which are in pending/running state on tainted + // nodes to lost updateNonTerminalAllocsToLost(s.plan, tainted, allocs) // Filter out the allocations in a terminal state diff --git a/scheduler/system_sched_test.go b/scheduler/system_sched_test.go index d641f12b2..1efff4cac 100644 --- a/scheduler/system_sched_test.go +++ b/scheduler/system_sched_test.go @@ -752,6 +752,65 @@ func TestSystemSched_NodeDown(t *testing.T) { h.AssertEvalStatus(t, structs.EvalStatusComplete) } +func TestSystemSched_NodeDrain_Down(t *testing.T) { + h := NewHarness(t) + + // Register a draining node + node := mock.Node() + node.Drain = true + node.Status = structs.NodeStatusDown + noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + + // Generate a fake job allocated on that node. + job := mock.SystemJob() + noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + + alloc := mock.Alloc() + alloc.Job = job + alloc.JobID = job.ID + alloc.NodeID = node.ID + alloc.Name = "my-job.web[0]" + noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + + // Create a mock evaluation to deal with the node update + eval := &structs.Evaluation{ + ID: structs.GenerateUUID(), + Priority: 50, + TriggeredBy: structs.EvalTriggerNodeUpdate, + JobID: job.ID, + NodeID: node.ID, + } + + // Process the evaluation + err := h.Process(NewServiceScheduler, eval) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Ensure a single plan + if len(h.Plans) != 1 { + t.Fatalf("bad: %#v", h.Plans) + } + plan := h.Plans[0] + + // Ensure the plan evicted non terminal allocs + if len(plan.NodeUpdate[node.ID]) != 1 { + t.Fatalf("bad: %#v", plan) + } + + // Ensure that the allocation is marked as lost + var lostAllocs []string + for _, alloc := range plan.NodeUpdate[node.ID] { + lostAllocs = append(lostAllocs, alloc.ID) + } + expected := []string{alloc.ID} + + if !reflect.DeepEqual(lostAllocs, expected) { + t.Fatalf("expected: %v, actual: %v", expected, lostAllocs) + } + h.AssertEvalStatus(t, structs.EvalStatusComplete) +} + func TestSystemSched_NodeDrain(t *testing.T) { h := NewHarness(t)