From 648bacda778b07142c91f46aaf37d5fae8528f8f Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 4 Jun 2025 09:29:28 +0200 Subject: [PATCH] testing: migrate nomad/scheduler off of testify (#25968) In the spirit of #25909, this PR removes testify dependencies from the scheduler package, along with reflect.DeepEqual removal. This is again a combination of semgrep and hx editing magic. --------- Co-authored-by: Tim Gross --- .semgrep/imports.yml | 1 + scheduler/annotate_test.go | 17 +- scheduler/benchmarks/helpers_test.go | 6 +- scheduler/context_test.go | 27 +- scheduler/device_test.go | 54 +- scheduler/feasible_test.go | 21 +- scheduler/generic_sched_test.go | 1055 +++++++++++++------------- scheduler/numa_ce_test.go | 8 +- scheduler/preemption_test.go | 102 +-- scheduler/rank_test.go | 117 ++- scheduler/reconcile_test.go | 8 +- scheduler/scheduler_sysbatch_test.go | 501 ++++++------ scheduler/scheduler_system_test.go | 579 +++++++------- scheduler/select_test.go | 9 +- scheduler/spread_test.go | 53 +- scheduler/stack_test.go | 10 +- scheduler/testing.go | 7 +- scheduler/util_test.go | 174 +++-- 18 files changed, 1306 insertions(+), 1443 deletions(-) diff --git a/.semgrep/imports.yml b/.semgrep/imports.yml index 032e1d5e3..7e8a83b3a 100644 --- a/.semgrep/imports.yml +++ b/.semgrep/imports.yml @@ -29,3 +29,4 @@ rules: paths: include: - "nomad/state/*_test.go" + - "nomad/scheduler/*_test.go" diff --git a/scheduler/annotate_test.go b/scheduler/annotate_test.go index 09d8880b6..4adc0de29 100644 --- a/scheduler/annotate_test.go +++ b/scheduler/annotate_test.go @@ -4,11 +4,11 @@ package scheduler import ( - "reflect" "testing" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" + "github.com/shoenig/test/must" ) func TestAnnotateTaskGroup_Updates(t *testing.T) { @@ -50,9 +50,8 @@ func TestAnnotateTaskGroup_Updates(t *testing.T) { t.Fatalf("annotateTaskGroup(%#v, %#v) failed: %#v", tgDiff, annotations, err) } - if !reflect.DeepEqual(tgDiff, expected) { - t.Fatalf("got %#v, want %#v", tgDiff, expected) - } + must.Eq(t, expected, tgDiff) + } func TestAnnotateCountChange_NonEdited(t *testing.T) { @@ -61,9 +60,8 @@ func TestAnnotateCountChange_NonEdited(t *testing.T) { tg := &structs.TaskGroupDiff{} tgOrig := &structs.TaskGroupDiff{} annotateCountChange(tg) - if !reflect.DeepEqual(tgOrig, tg) { - t.Fatalf("annotateCountChange(%#v) should not have caused any annotation: %#v", tgOrig, tg) - } + must.Eq(t, tgOrig, tg) + } func TestAnnotateCountChange(t *testing.T) { @@ -116,9 +114,8 @@ func TestAnnotateTask_NonEdited(t *testing.T) { td := &structs.TaskDiff{Type: structs.DiffTypeNone} tdOrig := &structs.TaskDiff{Type: structs.DiffTypeNone} annotateTask(td, tgd) - if !reflect.DeepEqual(tdOrig, td) { - t.Fatalf("annotateTask(%#v) should not have caused any annotation: %#v", tdOrig, td) - } + must.Eq(t, tdOrig, td) + } func TestAnnotateTask(t *testing.T) { diff --git a/scheduler/benchmarks/helpers_test.go b/scheduler/benchmarks/helpers_test.go index 956846e11..5d340b153 100644 --- a/scheduler/benchmarks/helpers_test.go +++ b/scheduler/benchmarks/helpers_test.go @@ -16,7 +16,7 @@ import ( "github.com/hashicorp/nomad/helper/raftutil" "github.com/hashicorp/nomad/scheduler" - "github.com/stretchr/testify/require" + "github.com/shoenig/test/must" ) // NewBenchmarkingHarness creates a starting test harness with state @@ -30,13 +30,13 @@ func NewBenchmarkingHarness(t testing.TB) *scheduler.Harness { datadir := os.Getenv("NOMAD_BENCHMARK_DATADIR") if datadir != "" { h, err := NewHarnessFromDataDir(t, datadir) - require.NoError(t, err) + must.NoError(t, err) return h } else { snapshotPath := os.Getenv("NOMAD_BENCHMARK_SNAPSHOT") if snapshotPath != "" { h, err := NewHarnessFromSnapshot(t, snapshotPath) - require.NoError(t, err) + must.NoError(t, err) return h } } diff --git a/scheduler/context_test.go b/scheduler/context_test.go index 5fe23e2a8..4346976e6 100644 --- a/scheduler/context_test.go +++ b/scheduler/context_test.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" "github.com/shoenig/test/must" - "github.com/stretchr/testify/require" ) func testContext(t testing.TB) (*state.StateStore, *EvalContext) { @@ -160,9 +159,9 @@ func TestEvalContext_ProposedAlloc(t *testing.T) { ClientStatus: structs.AllocClientStatusPending, TaskGroup: "web", } - require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) - require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + must.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) + must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) // Add a planned eviction to alloc1 plan := ctx.Plan() @@ -299,10 +298,10 @@ func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) { ClientStatus: structs.AllocClientStatusPending, TaskGroup: "web", } - require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(allocEvict.JobID))) - require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPreempt.JobID))) - require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPropose.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{allocEvict, allocPreempt, allocPropose})) + must.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(allocEvict.JobID))) + must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPreempt.JobID))) + must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPropose.JobID))) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{allocEvict, allocPreempt, allocPropose})) // Plan to evict one alloc and preempt another plan := ctx.Plan() @@ -310,8 +309,8 @@ func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) { plan.NodeUpdate[nodes[0].Node.ID] = []*structs.Allocation{allocPreempt} proposed, err := ctx.ProposedAllocs(nodes[0].Node.ID) - require.NoError(t, err) - require.Len(t, proposed, 1) + must.NoError(t, err) + must.SliceLen(t, 1, proposed) } func TestEvalEligibility_JobStatus(t *testing.T) { @@ -431,7 +430,7 @@ func TestEvalEligibility_GetClasses(t *testing.T) { } actClasses := e.GetClasses() - require.Equal(t, expClasses, actClasses) + must.Eq(t, expClasses, actClasses) } func TestEvalEligibility_GetClasses_JobEligible_TaskGroupIneligible(t *testing.T) { ci.Parallel(t) @@ -455,7 +454,7 @@ func TestEvalEligibility_GetClasses_JobEligible_TaskGroupIneligible(t *testing.T } actClasses := e.GetClasses() - require.Equal(t, expClasses, actClasses) + must.Eq(t, expClasses, actClasses) } func TestPortCollisionEvent_Copy(t *testing.T) { @@ -503,6 +502,6 @@ func TestPortCollisionEvent_Sanitize(t *testing.T) { } cleanEv := ev.Sanitize() - require.Empty(t, cleanEv.Node.SecretID) - require.Nil(t, cleanEv.Allocations[0].Job) + must.Eq(t, "", cleanEv.Node.SecretID) + must.Nil(t, cleanEv.Allocations[0].Job) } diff --git a/scheduler/device_test.go b/scheduler/device_test.go index b188b6059..3345b48d3 100644 --- a/scheduler/device_test.go +++ b/scheduler/device_test.go @@ -14,7 +14,6 @@ import ( "github.com/hashicorp/nomad/nomad/structs" psstructs "github.com/hashicorp/nomad/plugins/shared/structs" "github.com/shoenig/test/must" - "github.com/stretchr/testify/require" ) func anyMemoryNodeMatcher() *memoryNodeMatcher { @@ -103,68 +102,64 @@ func collectInstanceIDs(devices ...*structs.NodeDeviceResource) []string { func TestDeviceAllocator_Allocate_GenericRequest(t *testing.T) { ci.Parallel(t) - require := require.New(t) _, ctx := testContext(t) n := devNode() d := newDeviceAllocator(ctx, n) - require.NotNil(d) + must.NotNil(t, d) // Build the request ask := deviceRequest("gpu", 1, nil, nil) mem := anyMemoryNodeMatcher() out, score, err := d.createOffer(mem, ask) - require.NotNil(out) - require.Zero(score) - require.NoError(err) + must.NotNil(t, out) + must.Zero(t, score) + must.NoError(t, err) // Check that we got the nvidia device - require.Len(out.DeviceIDs, 1) - require.Contains(collectInstanceIDs(n.NodeResources.Devices[0]), out.DeviceIDs[0]) + must.SliceLen(t, 1, out.DeviceIDs) + must.SliceContains(t, collectInstanceIDs(n.NodeResources.Devices[0]), out.DeviceIDs[0]) } // Test that asking for a device that is fully specified works. func TestDeviceAllocator_Allocate_FullyQualifiedRequest(t *testing.T) { ci.Parallel(t) - require := require.New(t) _, ctx := testContext(t) n := devNode() d := newDeviceAllocator(ctx, n) - require.NotNil(d) + must.NotNil(t, d) // Build the request ask := deviceRequest("intel/fpga/F100", 1, nil, nil) mem := anyMemoryNodeMatcher() out, score, err := d.createOffer(mem, ask) - require.NotNil(out) - require.Zero(score) - require.NoError(err) + must.NotNil(t, out) + must.Zero(t, score) + must.NoError(t, err) // Check that we got the nvidia device - require.Len(out.DeviceIDs, 1) - require.Contains(collectInstanceIDs(n.NodeResources.Devices[1]), out.DeviceIDs[0]) + must.SliceLen(t, 1, out.DeviceIDs) + must.SliceContains(t, collectInstanceIDs(n.NodeResources.Devices[1]), out.DeviceIDs[0]) } // Test that asking for a device with too much count doesn't place func TestDeviceAllocator_Allocate_NotEnoughInstances(t *testing.T) { ci.Parallel(t) - require := require.New(t) _, ctx := testContext(t) n := devNode() d := newDeviceAllocator(ctx, n) - require.NotNil(d) + must.NotNil(t, d) // Build the request ask := deviceRequest("gpu", 4, nil, nil) mem := anyMemoryNodeMatcher() out, _, err := d.createOffer(mem, ask) - require.Nil(out) - require.Error(err) - require.Contains(err.Error(), "no devices match request") + must.Nil(t, out) + must.ErrorContains(t, err, "no devices match request") } func TestDeviceAllocator_Allocate_NUMA_available(t *testing.T) { @@ -338,14 +333,14 @@ func TestDeviceAllocator_Allocate_Constraints(t *testing.T) { mem := anyMemoryNodeMatcher() out, score, err := d.createOffer(mem, ask) if c.NoPlacement { - require.Nil(t, out) + must.Nil(t, out) } else { must.NotNil(t, out) must.Zero(t, score) must.NoError(t, err) // Check that we got the right nvidia device instance, and - // specific device instance IDs if required + // specific device instance IDs if mustd must.Len(t, 1, out.DeviceIDs) must.SliceContains(t, collectInstanceIDs(c.ExpectedDevice), out.DeviceIDs[0]) must.SliceContainsSubset(t, c.ExpectedDeviceIDs, out.DeviceIDs) @@ -434,27 +429,26 @@ func TestDeviceAllocator_Allocate_Affinities(t *testing.T) { for _, c := range cases { t.Run(c.Name, func(t *testing.T) { - require := require.New(t) _, ctx := testContext(t) d := newDeviceAllocator(ctx, n) - require.NotNil(d) + must.NotNil(t, d) // Build the request ask := deviceRequest(c.Name, 1, nil, c.Affinities) mem := anyMemoryNodeMatcher() out, score, err := d.createOffer(mem, ask) - require.NotNil(out) - require.NoError(err) + must.NotNil(t, out) + must.NoError(t, err) if c.ZeroScore { - require.Zero(score) + must.Zero(t, score) } else { - require.NotZero(score) + must.NonZero(t, score) } // Check that we got the nvidia device - require.Len(out.DeviceIDs, 1) - require.Contains(collectInstanceIDs(c.ExpectedDevice), out.DeviceIDs[0]) + must.SliceLen(t, 1, out.DeviceIDs) + must.SliceContains(t, collectInstanceIDs(c.ExpectedDevice), out.DeviceIDs[0]) }) } } diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index da7e09072..20079487e 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -5,7 +5,6 @@ package scheduler import ( "fmt" - "reflect" "testing" "time" @@ -64,9 +63,8 @@ func TestStaticIterator_SetNodes(t *testing.T) { static.SetNodes(newNodes) out := collectFeasible(static) - if !reflect.DeepEqual(out, newNodes) { - t.Fatalf("bad: %#v", out) - } + must.Eq(t, newNodes, out) + } func TestRandomIterator(t *testing.T) { @@ -86,9 +84,8 @@ func TestRandomIterator(t *testing.T) { if len(out) != len(nodes) { t.Fatalf("missing nodes") } - if reflect.DeepEqual(out, nodes) { - t.Fatalf("same order") - } + must.NotEq(t, nodes, out) + } func TestHostVolumeChecker_Static(t *testing.T) { @@ -1360,7 +1357,7 @@ func TestResolveConstraintTarget(t *testing.T) { type tcase struct { target string node *structs.Node - val interface{} + val string result bool } node := mock.Node() @@ -1422,11 +1419,9 @@ func TestResolveConstraintTarget(t *testing.T) { for _, tc := range cases { res, ok := resolveTarget(tc.target, tc.node) - if ok != tc.result { - t.Fatalf("TC: %#v, Result: %v %v", tc, res, ok) - } - if ok && !reflect.DeepEqual(res, tc.val) { - t.Fatalf("TC: %#v, Result: %v %v", tc, res, ok) + must.Eq(t, ok, tc.result) + if ok { + must.Eq(t, res, tc.val) } } } diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index 50979cc5a..3f2e1570d 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -20,8 +20,6 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/shoenig/test" "github.com/shoenig/test/must" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestServiceSched_JobRegister(t *testing.T) { @@ -30,14 +28,14 @@ func TestServiceSched_JobRegister(t *testing.T) { h := NewHarness(t) // Create some nodes - for i := 0; i < 10; i++ { + for range 10 { node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -49,7 +47,7 @@ func TestServiceSched_JobRegister(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -89,7 +87,7 @@ func TestServiceSched_JobRegister(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -132,13 +130,13 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a job job := mock.Job() job.TaskGroups[0].EphemeralDisk.Sticky = true - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -149,7 +147,7 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewServiceScheduler, eval); err != nil { @@ -171,7 +169,7 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { // Update the job to force a rolling upgrade updated := job.Copy() updated.TaskGroups[0].Tasks[0].Resources.CPU += 10 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, updated)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, updated)) // Create a mock evaluation to handle the update eval = &structs.Evaluation{ @@ -182,7 +180,7 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) h1 := NewHarnessWithState(t, h.State) if err := h1.Process(NewServiceScheduler, eval); err != nil { t.Fatalf("err: %v", err) @@ -334,14 +332,14 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { // Create a node node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a job with count 2 and disk as 60GB so that only one allocation // can fit job := mock.Job() job.TaskGroups[0].Count = 2 job.TaskGroups[0].EphemeralDisk.SizeMB = 88 * 1024 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -353,7 +351,7 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -393,7 +391,7 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure only one allocation was placed if len(out) != 1 { @@ -411,7 +409,7 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a job that uses distinct host and has count 1 higher than what is @@ -419,7 +417,7 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { job := mock.Job() job.TaskGroups[0].Count = 11 job.Constraints = append(job.Constraints, &structs.Constraint{Operand: structs.ConstraintDistinctHosts}) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -431,7 +429,7 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -468,7 +466,7 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -500,7 +498,7 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { rack = "rack1" } node.Meta["rack"] = rack - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a job that uses distinct property and has count higher than what is @@ -513,7 +511,7 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { LTarget: "${meta.rack}", RTarget: "2", }) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -525,7 +523,7 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -567,7 +565,7 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed if len(out) != 4 { @@ -595,7 +593,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { for i := 0; i < 2; i++ { node := mock.Node() node.Meta["ssd"] = "true" - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a job that uses distinct property only on one task group. @@ -610,7 +608,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { job.TaskGroups[1].Name = "tg2" job.TaskGroups[1].Count = 2 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -621,7 +619,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -657,7 +655,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed if len(out) != 3 { @@ -671,7 +669,6 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) ci.Parallel(t) h := NewHarness(t) - assert := assert.New(t) // Create a job that uses distinct property over the node-id job := mock.Job() @@ -681,14 +678,14 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) Operand: structs.ConstraintDistinctProperty, LTarget: "${node.unique.id}", }) - assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job), "UpsertJob") + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create some nodes var nodes []*structs.Node for i := 0; i < 6; i++ { node := mock.Node() nodes = append(nodes, node) - assert.Nil(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node), "UpsertNode") + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create some allocations @@ -701,12 +698,12 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - assert.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs), "UpsertAllocs") + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the count job2 := job.Copy() job2.TaskGroups[0].Count = 6 - assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2), "UpsertJob") + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -717,35 +714,35 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation - assert.Nil(h.Process(NewServiceScheduler, eval), "Process") + must.NoError(t, h.Process(NewServiceScheduler, eval)) // Ensure a single plan - assert.Len(h.Plans, 1, "Number of plans") + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan doesn't have annotations. - assert.Nil(plan.Annotations, "Plan.Annotations") + must.Nil(t, plan.Annotations) // Ensure the eval hasn't spawned blocked eval - assert.Len(h.CreateEvals, 0, "Created Evals") + must.Len(t, 0, h.CreateEvals) // Ensure the plan allocated var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - assert.Len(planned, 6, "Planned Allocations") + must.Len(t, 6, planned) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - assert.Nil(err, "AllocsByJob") + must.NoError(t, err) // Ensure all allocations placed - assert.Len(out, 6, "Placed Allocations") + must.Len(t, 6, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -754,8 +751,6 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) func TestServiceSched_Spread(t *testing.T) { ci.Parallel(t) - assert := assert.New(t) - start := uint8(100) step := uint8(10) @@ -783,7 +778,7 @@ func TestServiceSched_Spread(t *testing.T) { }, }, }) - assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job), "UpsertJob") + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create some nodes, half in dc2 var nodes []*structs.Node nodeMap := make(map[string]*structs.Node) @@ -797,7 +792,7 @@ func TestServiceSched_Spread(t *testing.T) { node.NodeResources.MinDynamicPort = 20000 node.NodeResources.MaxDynamicPort = 20005 nodes = append(nodes, node) - assert.Nil(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node), "UpsertNode") + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) nodeMap[node.ID] = node } @@ -810,20 +805,20 @@ func TestServiceSched_Spread(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation - assert.Nil(h.Process(NewServiceScheduler, eval), "Process") + must.NoError(t, h.Process(NewServiceScheduler, eval)) // Ensure a single plan - assert.Len(h.Plans, 1, "Number of plans") + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan doesn't have annotations. - assert.Nil(plan.Annotations, "Plan.Annotations") + must.Nil(t, plan.Annotations) // Ensure the eval hasn't spawned blocked eval - assert.Len(h.CreateEvals, 0, "Created Evals") + must.Len(t, 0, h.CreateEvals) // Ensure the plan allocated var planned []*structs.Allocation @@ -835,14 +830,14 @@ func TestServiceSched_Spread(t *testing.T) { c += len(allocList) dcAllocsMap[dc] = c } - assert.Len(planned, 10, "Planned Allocations") + must.Len(t, 10, planned) expectedCounts := make(map[string]int) expectedCounts["dc1"] = 10 - i if i > 0 { expectedCounts["dc2"] = i } - require.Equal(t, expectedCounts, dcAllocsMap) + must.Eq(t, expectedCounts, dcAllocsMap) h.AssertEvalStatus(t, structs.EvalStatusComplete) }) @@ -1070,8 +1065,8 @@ func TestServiceSched_JobRegister_NodePool_Downgrade(t *testing.T) { must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) processErr := h.Process(NewServiceScheduler, eval) - require.NoError(t, processErr, "failed to process eval") - require.Len(t, h.Plans, 1) + must.NoError(t, processErr, must.Sprint("failed to process eval")) + must.SliceLen(t, 1, h.Plans) // Verify the plan places the new allocation in the spread node pool and // the replacement failure from the previous version in the binpack pool. @@ -1098,8 +1093,6 @@ func TestServiceSched_JobRegister_NodePool_Downgrade(t *testing.T) { func TestServiceSched_EvenSpread(t *testing.T) { ci.Parallel(t) - assert := assert.New(t) - h := NewHarness(t) // Create a job that uses even spread over data center job := mock.Job() @@ -1110,7 +1103,7 @@ func TestServiceSched_EvenSpread(t *testing.T) { Attribute: "${node.datacenter}", Weight: 100, }) - assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job), "UpsertJob") + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create some nodes, half in dc2 var nodes []*structs.Node nodeMap := make(map[string]*structs.Node) @@ -1120,7 +1113,7 @@ func TestServiceSched_EvenSpread(t *testing.T) { node.Datacenter = "dc2" } nodes = append(nodes, node) - assert.Nil(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node), "UpsertNode") + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) nodeMap[node.ID] = node } @@ -1133,20 +1126,20 @@ func TestServiceSched_EvenSpread(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation - assert.Nil(h.Process(NewServiceScheduler, eval), "Process") + must.NoError(t, h.Process(NewServiceScheduler, eval)) // Ensure a single plan - assert.Len(h.Plans, 1, "Number of plans") + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan doesn't have annotations. - assert.Nil(plan.Annotations, "Plan.Annotations") + must.Nil(t, plan.Annotations) // Ensure the eval hasn't spawned blocked eval - assert.Len(h.CreateEvals, 0, "Created Evals") + must.Len(t, 0, h.CreateEvals) // Ensure the plan allocated var planned []*structs.Allocation @@ -1158,14 +1151,14 @@ func TestServiceSched_EvenSpread(t *testing.T) { c += len(allocList) dcAllocsMap[dc] = c } - assert.Len(planned, 10, "Planned Allocations") + must.Len(t, 10, planned) // Expect even split allocs across datacenter expectedCounts := make(map[string]int) expectedCounts["dc1"] = 5 expectedCounts["dc2"] = 5 - require.Equal(t, expectedCounts, dcAllocsMap) + must.Eq(t, expectedCounts, dcAllocsMap) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -1178,12 +1171,12 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1195,7 +1188,7 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) { AnnotatePlan: true, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1221,7 +1214,7 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -1246,9 +1239,8 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) { } expected := &structs.DesiredUpdates{Place: 10} - if !reflect.DeepEqual(desiredChanges, expected) { - t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected) - } + must.Eq(t, expected, desiredChanges) + } func TestServiceSched_JobRegister_CountZero(t *testing.T) { @@ -1259,13 +1251,13 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a job and set the task group count to zero. job := mock.Job() job.TaskGroups[0].Count = 0 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1276,7 +1268,7 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1292,7 +1284,7 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure no allocations placed if len(out) != 0 { @@ -1310,7 +1302,7 @@ func TestServiceSched_JobRegister_AllocFail(t *testing.T) { // Create NO nodes // Create a job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1322,7 +1314,7 @@ func TestServiceSched_JobRegister_AllocFail(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1393,17 +1385,17 @@ func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { }, } node.ComputeClass() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create an ineligible node node2 := mock.Node() node2.Attributes["kernel.name"] = "windows" node2.ComputeClass() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) // Create a jobs job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1415,7 +1407,7 @@ func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1490,8 +1482,8 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { // Create one node node := mock.Node() node.NodeClass = "class_0" - require.NoError(t, node.ComputeClass()) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, node.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a job that constrains on a node class job := mock.Job() @@ -1507,7 +1499,7 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { tg2.Name = "web2" tg2.Constraints[1].RTarget = "class_1" job.TaskGroups = append(job.TaskGroups, tg2) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1518,7 +1510,7 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -1543,7 +1535,7 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { // Ensure two allocations placed ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) if len(out) != 2 { t.Fatalf("bad: %#v", out) } @@ -1748,7 +1740,7 @@ func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { // Create a job and set the task group count to zero. job := mock.Job() job.TaskGroups[0].Count = 0 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock blocked evaluation eval := &structs.Evaluation{ @@ -1761,7 +1753,7 @@ func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { } // Insert it into the state store - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1828,7 +1820,7 @@ func TestServiceSched_Plan_Partial_Progress(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure only one allocations placed must.SliceLen(t, 1, out) @@ -1847,7 +1839,7 @@ func TestServiceSched_EvaluateBlockedEval(t *testing.T) { // Create a job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock blocked evaluation eval := &structs.Evaluation{ @@ -1860,7 +1852,7 @@ func TestServiceSched_EvaluateBlockedEval(t *testing.T) { } // Insert it into the state store - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1895,12 +1887,12 @@ func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a job and set the task group count to zero. job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock blocked evaluation eval := &structs.Evaluation{ @@ -1913,7 +1905,7 @@ func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { } // Insert it into the state store - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1953,7 +1945,7 @@ func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -1984,12 +1976,12 @@ func TestServiceSched_JobModify(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -2000,7 +1992,7 @@ func TestServiceSched_JobModify(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation @@ -2014,7 +2006,7 @@ func TestServiceSched_JobModify(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusFailed // #10446 terminal = append(terminal, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) // Update the job job2 := mock.Job() @@ -2022,7 +2014,7 @@ func TestServiceSched_JobModify(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -2033,7 +2025,7 @@ func TestServiceSched_JobModify(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2068,7 +2060,7 @@ func TestServiceSched_JobModify(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) @@ -2383,8 +2375,6 @@ func TestServiceSched_JobModify_Datacenters(t *testing.T) { h := NewHarness(t) - require := require.New(t) - // Create some nodes in 3 DCs var nodes []*structs.Node for i := 1; i < 4; i++ { @@ -2398,7 +2388,7 @@ func TestServiceSched_JobModify_Datacenters(t *testing.T) { job := mock.Job() job.TaskGroups[0].Count = 3 job.Datacenters = []string{"dc1", "dc2", "dc3"} - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 3; i++ { @@ -2409,13 +2399,13 @@ func TestServiceSched_JobModify_Datacenters(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job to 2 DCs job2 := job.Copy() job2.TaskGroups[0].Count = 4 job2.Datacenters = []string{"dc1", "dc2"} - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -2426,35 +2416,35 @@ func TestServiceSched_JobModify_Datacenters(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) - require.NoError(err) + must.NoError(t, err) h.AssertEvalStatus(t, structs.EvalStatusComplete) // Ensure a single plan - require.Len(h.Plans, 1) + must.SliceLen(t, 1, h.Plans) plan := h.Plans[0] - require.Len(plan.NodeUpdate, 1) // alloc in DC3 gets destructive update - require.Len(plan.NodeUpdate[nodes[2].ID], 1) - require.Equal(allocs[2].ID, plan.NodeUpdate[nodes[2].ID][0].ID) + must.MapLen(t, 1, plan.NodeUpdate) // alloc in DC3 gets destructive update + must.SliceLen(t, 1, plan.NodeUpdate[nodes[2].ID]) + must.Eq(t, allocs[2].ID, plan.NodeUpdate[nodes[2].ID][0].ID) - require.Len(plan.NodeAllocation, 2) // only 2 eligible nodes + must.MapLen(t, 2, plan.NodeAllocation) // only 2 eligible nodes placed := map[string]*structs.Allocation{} for node, placedAllocs := range plan.NodeAllocation { - require.True( + must.True(t, slices.Contains([]string{nodes[0].ID, nodes[1].ID}, node), - "allocation placed on ineligible node", + must.Sprint("allocation placed on ineligible node"), ) for _, alloc := range placedAllocs { placed[alloc.ID] = alloc } } - require.Len(placed, 4) - require.Equal(nodes[0].ID, placed[allocs[0].ID].NodeID, "alloc should not have moved") - require.Equal(nodes[1].ID, placed[allocs[1].ID].NodeID, "alloc should not have moved") + must.MapLen(t, 4, placed) + must.Eq(t, nodes[0].ID, placed[allocs[0].ID].NodeID, must.Sprint("alloc should not have moved")) + must.Eq(t, nodes[1].ID, placed[allocs[1].ID].NodeID, must.Sprint("alloc should not have moved")) } // Have a single node and submit a job. Increment the count such that all fit @@ -2468,13 +2458,13 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { // Create one node node := mock.Node() node.NodeResources.Cpu.CpuShares = 1000 - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a fake job with one allocation job := mock.Job() job.TaskGroups[0].Tasks[0].Resources.CPU = 256 job2 := job.Copy() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation alloc := mock.Alloc() @@ -2484,11 +2474,11 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { alloc.Name = "my-job.web[0]" alloc.AllocatedResources.Tasks["web"].Cpu.CpuShares = 256 allocs = append(allocs, alloc) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job to count 3 job2.TaskGroups[0].Count = 3 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -2499,7 +2489,7 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2543,7 +2533,7 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) @@ -2564,12 +2554,12 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -2580,7 +2570,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { alloc.Name = structs.AllocName(alloc.JobID, alloc.TaskGroup, uint(i)) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation @@ -2593,13 +2583,13 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop terminal = append(terminal, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) // Update the job to be count zero job2 := mock.Job() job2.ID = job.ID job2.TaskGroups[0].Count = 0 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -2610,7 +2600,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2645,7 +2635,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) @@ -2666,12 +2656,12 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -2682,7 +2672,7 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.Job() @@ -2697,7 +2687,7 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -2708,7 +2698,7 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2772,7 +2762,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { // Create a node and clear the reserved resources node := mock.Node() node.ReservedResources = nil - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a resource ask that is the same as the resources available on the // node @@ -2800,7 +2790,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { job := mock.Job() job.TaskGroups[0].Count = 1 job.TaskGroups[0].Tasks[0].Resources = request - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.Alloc() alloc.AllocatedResources = allocated @@ -2808,7 +2798,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Update the job to place more versions of the task group, drop the count // and force destructive updates @@ -2824,7 +2814,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) eval := &structs.Evaluation{ Namespace: structs.DefaultNamespace, @@ -2834,7 +2824,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2896,12 +2886,12 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -2912,7 +2902,7 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.Job() @@ -2928,7 +2918,7 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -2939,7 +2929,7 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2990,26 +2980,24 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { // Ensure local state was not altered in scheduler staleDState, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name] - require.True(t, ok) + must.True(t, ok) - require.Equal(t, 0, len(staleDState.PlacedCanaries)) + must.Eq(t, 0, len(staleDState.PlacedCanaries)) ws := memdb.NewWatchSet() // Grab the latest state deploy, err := h.State.DeploymentByID(ws, plan.Deployment.ID) - require.NoError(t, err) + must.NoError(t, err) state, ok := deploy.TaskGroups[job.TaskGroups[0].Name] - require.True(t, ok) + must.True(t, ok) - require.Equal(t, 10, state.DesiredTotal) - require.Equal(t, state.DesiredCanaries, desiredUpdates) + must.Eq(t, 10, state.DesiredTotal) + must.Eq(t, desiredUpdates, state.DesiredCanaries) // Assert the canaries were added to the placed list - if len(state.PlacedCanaries) != desiredUpdates { - assert.Fail(t, "expected PlacedCanaries to equal desiredUpdates", state) - } + must.Eq(t, desiredUpdates, len(state.PlacedCanaries)) } func TestServiceSched_JobModify_InPlace(t *testing.T) { @@ -3022,15 +3010,15 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations and create an older deployment job := mock.Job() d := mock.Deployment() d.JobID = job.ID - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) taskName := job.TaskGroups[0].Tasks[0].Name @@ -3059,7 +3047,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { alloc.AllocatedResources.Shared = asr allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.Job() @@ -3071,7 +3059,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { MinHealthyTime: 10 * time.Second, HealthyDeadline: 10 * time.Minute, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -3082,7 +3070,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -3122,25 +3110,21 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed - if len(out) != 10 { - t.Fatalf("bad: %#v", out) - } + must.Len(t, 10, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) // Verify the allocated networks and devices did not change rp := structs.Port{Label: "admin", Value: 5000} for _, alloc := range out { // Verify Shared Allocared Resources Persisted - require.Equal(t, alloc.AllocatedResources.Shared.Ports, asr.Ports) - require.Equal(t, alloc.AllocatedResources.Shared.Networks, asr.Networks) + must.Eq(t, asr.Ports, alloc.AllocatedResources.Shared.Ports) + must.Eq(t, asr.Networks, alloc.AllocatedResources.Shared.Networks) for _, resources := range alloc.AllocatedResources.Tasks { - if resources.Networks[0].ReservedPorts[0] != rp { - t.Fatalf("bad: %#v", alloc) - } + must.Eq(t, rp, resources.Networks[0].ReservedPorts[0]) if len(resources.Devices) == 0 || reflect.DeepEqual(resources.Devices[0], adr) { t.Fatalf("bad devices has changed: %#v", alloc) } @@ -3173,12 +3157,12 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { // Create node node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a fake job with 0.8 allocations job := mock.Job() job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create 0.8 alloc alloc := mock.Alloc() @@ -3186,13 +3170,13 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.AllocatedResources = nil // 0.8 didn't have this - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Update the job inplace job2 := job.Copy() job2.TaskGroups[0].Tasks[0].Services[0].Tags[0] = "newtag" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -3203,14 +3187,14 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.SliceLen(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan did not evict any allocs @@ -3218,31 +3202,31 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } - require.Zero(t, update) + must.SliceLen(t, 0, update) // Ensure the plan updated the existing alloc var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Len(t, planned, 1) + must.SliceLen(t, 1, planned) for _, p := range planned { - require.Equal(t, job2, p.Job) + must.Eq(t, job2, p.Job) } // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed - require.Len(t, out, 1) + must.SliceLen(t, 1, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) newAlloc := out[0] // Verify AllocatedResources was set - require.NotNil(t, newAlloc.AllocatedResources) + must.NotNil(t, newAlloc.AllocatedResources) } func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { @@ -3256,7 +3240,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { node := mock.Node() node.Meta["rack"] = fmt.Sprintf("rack%d", i) nodes = append(nodes, node) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a job that uses distinct property and has count higher than what is @@ -3268,7 +3252,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { Operand: structs.ConstraintDistinctProperty, LTarget: "${meta.rack}", }) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) oldJob := job.Copy() oldJob.JobModifyIndex -= 1 @@ -3284,7 +3268,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -3295,7 +3279,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -3337,7 +3321,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -3363,14 +3347,13 @@ func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { ci.Parallel(t) h := NewHarness(t) - require := require.New(t) // Create some nodes var nodes []*structs.Node for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations and an update policy. @@ -3386,7 +3369,7 @@ func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -3406,7 +3389,7 @@ func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { failedAllocID := failedAlloc.ID successAllocID := allocs[0].ID - require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create and process a mock evaluation eval := &structs.Evaluation{ @@ -3417,43 +3400,43 @@ func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) - require.NoError(h.Process(NewServiceScheduler, eval)) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.Process(NewServiceScheduler, eval)) // Ensure we have one plan - require.Equal(1, len(h.Plans)) + must.Eq(t, 1, len(h.Plans)) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(err) + must.NoError(t, err) // Verify that one new allocation got created with its restart tracker info - require.Equal(3, len(out)) + must.Eq(t, 3, len(out)) var newAlloc *structs.Allocation for _, alloc := range out { if alloc.ID != successAllocID && alloc.ID != failedAllocID { newAlloc = alloc } } - require.Equal(failedAllocID, newAlloc.PreviousAllocation) - require.Equal(1, len(newAlloc.RescheduleTracker.Events)) - require.Equal(failedAllocID, newAlloc.RescheduleTracker.Events[0].PrevAllocID) + must.Eq(t, failedAllocID, newAlloc.PreviousAllocation) + must.Eq(t, 1, len(newAlloc.RescheduleTracker.Events)) + must.Eq(t, failedAllocID, newAlloc.RescheduleTracker.Events[0].PrevAllocID) // Verify that the node-reschedule penalty was applied to the new alloc for _, scoreMeta := range newAlloc.Metrics.ScoreMetaData { if scoreMeta.NodeID == failedAlloc.NodeID { - require.Equal(-1.0, scoreMeta.Scores["node-reschedule-penalty"], + must.Eq(t, -1.0, scoreMeta.Scores["node-reschedule-penalty"], must.Sprintf( "eval to replace failed alloc missing node-reshedule-penalty: %v", scoreMeta.Scores, - ) + )) } } // Update the job, such that it cannot be done in-place job2 := job.Copy() job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create and process a mock evaluation eval = &structs.Evaluation{ @@ -3464,25 +3447,25 @@ func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) - require.NoError(h.Process(NewServiceScheduler, eval)) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.Process(NewServiceScheduler, eval)) // Lookup the new allocations by JobID out, err = h.State.AllocsByJob(ws, job.Namespace, job2.ID, false) - require.NoError(err) + must.NoError(t, err) out, _ = structs.FilterTerminalAllocs(out) - require.Equal(2, len(out)) + must.Eq(t, 2, len(out)) // No new allocs have node-reschedule-penalty for _, alloc := range out { - require.Nil(alloc.RescheduleTracker) - require.NotNil(alloc.Metrics) + must.Nil(t, alloc.RescheduleTracker) + must.NotNil(t, alloc.Metrics) for _, scoreMeta := range alloc.Metrics.ScoreMetaData { if scoreMeta.NodeID != failedAlloc.NodeID { - require.Equal(0.0, scoreMeta.Scores["node-reschedule-penalty"], + must.Eq(t, 0.0, scoreMeta.Scores["node-reschedule-penalty"], must.Sprintf( "eval for updated job should not include node-reshedule-penalty: %v", scoreMeta.Scores, - ) + )) } } } @@ -3506,7 +3489,7 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) { for _, alloc := range allocs { h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -3517,7 +3500,7 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -3539,7 +3522,7 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure that the job field on the allocation is still populated for _, alloc := range out { @@ -3561,12 +3544,11 @@ func TestServiceSched_JobDeregister_Stopped(t *testing.T) { ci.Parallel(t) h := NewHarness(t) - require := require.New(t) // Generate a fake job with allocations job := mock.Job() job.Stop = true - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -3575,14 +3557,14 @@ func TestServiceSched_JobDeregister_Stopped(t *testing.T) { alloc.JobID = job.ID allocs = append(allocs, alloc) } - require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a summary where the queued allocs are set as we want to assert // they get zeroed out. summary := mock.JobSummary(job.ID) web := summary.Summary["web"] web.Queued = 2 - require.NoError(h.State.UpsertJobSummary(h.NextIndex(), summary)) + must.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), summary)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -3593,39 +3575,39 @@ func TestServiceSched_JobDeregister_Stopped(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation - require.NoError(h.Process(NewServiceScheduler, eval)) + must.NoError(t, h.Process(NewServiceScheduler, eval)) // Ensure a single plan - require.Len(h.Plans, 1) + must.SliceLen(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted all nodes - require.Len(plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"], len(allocs)) + must.SliceLen(t, len(allocs), plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"]) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(err) + must.NoError(t, err) // Ensure that the job field on the allocation is still populated for _, alloc := range out { - require.NotNil(alloc.Job) + must.NotNil(t, alloc.Job) } // Ensure no remaining allocations out, _ = structs.FilterTerminalAllocs(out) - require.Empty(out) + must.SliceLen(t, 0, out) // Assert the job summary is cleared out sout, err := h.State.JobSummaryByID(ws, job.Namespace, job.ID) - require.NoError(err) - require.NotNil(sout) - require.Contains(sout.Summary, "web") + must.NoError(t, err) + must.NotNil(t, sout) + must.MapContainsKey(t, sout.Summary, "web") webOut := sout.Summary["web"] - require.Zero(webOut.Queued) + must.Zero(t, webOut.Queued) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -3927,11 +3909,11 @@ func TestServiceSched_NodeUpdate(t *testing.T) { // Register a node node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a fake job with allocations and an update policy. job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -3942,14 +3924,14 @@ func TestServiceSched_NodeUpdate(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Mark some allocs as running ws := memdb.NewWatchSet() for i := 0; i < 4; i++ { out, _ := h.State.AllocByID(ws, allocs[i].ID) out.ClientStatus = structs.AllocClientStatusRunning - require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{out})) + must.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{out})) } // Create a mock evaluation which won't trigger any new placements @@ -3962,7 +3944,7 @@ func TestServiceSched_NodeUpdate(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -3983,17 +3965,17 @@ func TestServiceSched_NodeDrain(t *testing.T) { // Register a draining node node := mock.DrainNode() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations and an update policy. job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -4005,7 +3987,7 @@ func TestServiceSched_NodeDrain(t *testing.T) { alloc.DesiredTransition.Migrate = pointer.Of(true) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -4017,7 +3999,7 @@ func TestServiceSched_NodeDrain(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -4048,7 +4030,7 @@ func TestServiceSched_NodeDrain(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) @@ -4067,11 +4049,11 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { // Register a draining node node := mock.DrainNode() node.Status = structs.NodeStatusDown - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a fake job with allocations job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -4082,7 +4064,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Set the desired state of the allocs to stop var stop []*structs.Allocation @@ -4092,7 +4074,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { newAlloc.DesiredTransition.Migrate = pointer.Of(true) stop = append(stop, newAlloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), stop)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), stop)) // Mark some of the allocations as running var running []*structs.Allocation @@ -4101,7 +4083,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { newAlloc.ClientStatus = structs.AllocClientStatusRunning running = append(running, newAlloc) } - require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), running)) + must.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), running)) // Mark some of the allocations as complete var complete []*structs.Allocation @@ -4120,7 +4102,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { newAlloc.ClientStatus = structs.AllocClientStatusComplete complete = append(complete, newAlloc) } - require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), complete)) + must.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), complete)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -4133,7 +4115,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -4291,12 +4273,12 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { // Register a draining node node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a fake job with allocations and an update policy. job := mock.Job() job.TaskGroups[0].Count = 2 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -4308,10 +4290,10 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { alloc.DesiredTransition.Migrate = pointer.Of(true) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) node.DrainStrategy = mock.DrainNode().DrainStrategy - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -4323,7 +4305,7 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -4346,12 +4328,12 @@ func TestServiceSched_RetryLimit(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -4362,7 +4344,7 @@ func TestServiceSched_RetryLimit(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -4378,7 +4360,7 @@ func TestServiceSched_RetryLimit(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure no allocations placed if len(out) != 0 { @@ -4399,7 +4381,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations and an update policy. @@ -4415,7 +4397,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -4434,7 +4416,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { failedAllocID := allocs[1].ID successAllocID := allocs[0].ID - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -4445,7 +4427,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -4461,25 +4443,24 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Verify that one new allocation got created with its restart tracker info - assert := assert.New(t) - assert.Equal(3, len(out)) + must.Eq(t, 3, len(out)) var newAlloc *structs.Allocation for _, alloc := range out { if alloc.ID != successAllocID && alloc.ID != failedAllocID { newAlloc = alloc } } - assert.Equal(failedAllocID, newAlloc.PreviousAllocation) - assert.Equal(1, len(newAlloc.RescheduleTracker.Events)) - assert.Equal(failedAllocID, newAlloc.RescheduleTracker.Events[0].PrevAllocID) + must.Eq(t, failedAllocID, newAlloc.PreviousAllocation) + must.Eq(t, 1, len(newAlloc.RescheduleTracker.Events)) + must.Eq(t, failedAllocID, newAlloc.RescheduleTracker.Events[0].PrevAllocID) // Mark this alloc as failed again, should not get rescheduled newAlloc.ClientStatus = structs.AllocClientStatusFailed - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{newAlloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{newAlloc})) // Create another mock evaluation eval = &structs.Evaluation{ @@ -4490,15 +4471,15 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err = h.Process(NewServiceScheduler, eval) - assert.Nil(err) + must.NoError(t, err) // Verify no new allocs were created this time out, err = h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) - assert.Equal(3, len(out)) + must.NoError(t, err) + must.Eq(t, 3, len(out)) } @@ -4507,13 +4488,12 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { ci.Parallel(t) h := NewHarness(t) - require := require.New(t) // Create some nodes var nodes []*structs.Node for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations and an update policy. @@ -4530,7 +4510,7 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -4548,7 +4528,7 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { FinishedAt: now}} failedAllocID := allocs[1].ID - require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -4559,7 +4539,7 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -4575,22 +4555,22 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(err) + must.NoError(t, err) // Verify no new allocs were created - require.Equal(2, len(out)) + must.Eq(t, 2, len(out)) // Verify follow up eval was created for the failed alloc alloc, err := h.State.AllocByID(ws, failedAllocID) - require.Nil(err) - require.NotEmpty(alloc.FollowupEvalID) + must.NoError(t, err) + must.NotEq(t, "", alloc.FollowupEvalID) // Ensure there is a follow up eval. if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusPending { t.Fatalf("bad: %#v", h.CreateEvals) } followupEval := h.CreateEvals[0] - require.Equal(now.Add(delayDuration), followupEval.WaitUntil) + must.Eq(t, now.Add(delayDuration), followupEval.WaitUntil) } func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { @@ -4603,7 +4583,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } maxRestartAttempts := 3 @@ -4619,7 +4599,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -4637,7 +4617,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -4648,7 +4628,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) expectedNumAllocs := 3 expectedNumReschedTrackers := 1 @@ -4656,11 +4636,10 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { failedAllocId := allocs[1].ID failedNodeID := allocs[1].NodeID - assert := assert.New(t) for i := 0; i < maxRestartAttempts; i++ { // Process the evaluation err := h.Process(NewServiceScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure multiple plans if len(h.Plans) == 0 { @@ -4670,10 +4649,10 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Verify that a new allocation got created with its restart tracker info - assert.Equal(expectedNumAllocs, len(out)) + must.Eq(t, expectedNumAllocs, len(out)) // Find the new alloc with ClientStatusPending var pendingAllocs []*structs.Allocation @@ -4687,17 +4666,17 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { prevFailedAlloc = alloc } } - assert.Equal(1, len(pendingAllocs)) + must.Eq(t, 1, len(pendingAllocs)) newAlloc := pendingAllocs[0] - assert.Equal(expectedNumReschedTrackers, len(newAlloc.RescheduleTracker.Events)) + must.Eq(t, expectedNumReschedTrackers, len(newAlloc.RescheduleTracker.Events)) // Verify the previous NodeID in the most recent reschedule event reschedEvents := newAlloc.RescheduleTracker.Events - assert.Equal(failedAllocId, reschedEvents[len(reschedEvents)-1].PrevAllocID) - assert.Equal(failedNodeID, reschedEvents[len(reschedEvents)-1].PrevNodeID) + must.Eq(t, failedAllocId, reschedEvents[len(reschedEvents)-1].PrevAllocID) + must.Eq(t, failedNodeID, reschedEvents[len(reschedEvents)-1].PrevNodeID) // Verify that the next alloc of the failed alloc is the newly rescheduled alloc - assert.Equal(newAlloc.ID, prevFailedAlloc.NextAllocation) + must.Eq(t, newAlloc.ID, prevFailedAlloc.NextAllocation) // Mark this alloc as failed again newAlloc.ClientStatus = structs.AllocClientStatusFailed @@ -4708,7 +4687,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { failedAllocId = newAlloc.ID failedNodeID = newAlloc.NodeID - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{newAlloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{newAlloc})) // Create another mock evaluation eval = &structs.Evaluation{ @@ -4719,20 +4698,20 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) expectedNumAllocs += 1 expectedNumReschedTrackers += 1 } // Process last eval again, should not reschedule err := h.Process(NewServiceScheduler, eval) - assert.Nil(err) + must.NoError(t, err) // Verify no new allocs were created because restart attempts were exhausted ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) - assert.Equal(5, len(out)) // 2 original, plus 3 reschedule attempts + must.NoError(t, err) + must.Eq(t, 5, len(out)) // 2 original, plus 3 reschedule attempts } func TestServiceSched_BlockedReschedule(t *testing.T) { @@ -4942,7 +4921,7 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations and an update policy. @@ -4954,7 +4933,7 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { Delay: 5 * time.Second, Unlimited: true, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -5011,7 +4990,7 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { failedAllocID := allocs[1].ID successAllocID := allocs[0].ID - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -5022,7 +5001,7 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -5038,11 +5017,10 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Verify that one new allocation got created with its restart tracker info - assert := assert.New(t) - assert.Equal(3, len(out)) + must.Eq(t, 3, len(out)) var newAlloc *structs.Allocation for _, alloc := range out { if alloc.ID != successAllocID && alloc.ID != failedAllocID { @@ -5050,16 +5028,16 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { } } - assert.Equal(failedAllocID, newAlloc.PreviousAllocation) + must.Eq(t, failedAllocID, newAlloc.PreviousAllocation) // Verify that the new alloc copied the last 5 reschedule attempts - assert.Equal(6, len(newAlloc.RescheduleTracker.Events)) - assert.Equal(expectedFirstRescheduleEvent, newAlloc.RescheduleTracker.Events[0]) + must.Eq(t, 6, len(newAlloc.RescheduleTracker.Events)) + must.Eq(t, expectedFirstRescheduleEvent, newAlloc.RescheduleTracker.Events[0]) mostRecentRescheduleEvent := newAlloc.RescheduleTracker.Events[5] // Verify that the failed alloc ID is in the most recent reschedule event - assert.Equal(failedAllocID, mostRecentRescheduleEvent.PrevAllocID) + must.Eq(t, failedAllocID, mostRecentRescheduleEvent.PrevAllocID) // Verify that the delay value was captured correctly - assert.Equal(expectedDelay, mostRecentRescheduleEvent.Delay) + must.Eq(t, expectedDelay, mostRecentRescheduleEvent.Delay) } @@ -5071,13 +5049,12 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { for _, failedDeployment := range []bool{false, true} { t.Run(fmt.Sprintf("Failed Deployment: %v", failedDeployment), func(t *testing.T) { h := NewHarness(t) - require := require.New(t) // Create some nodes var nodes []*structs.Node for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations and a reschedule policy. @@ -5088,7 +5065,7 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { Interval: 15 * time.Minute, } jobIndex := h.NextIndex() - require.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) deployment := mock.Deployment() deployment.JobID = job.ID @@ -5098,7 +5075,7 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { deployment.Status = structs.DeploymentStatusFailed } - require.Nil(h.State.UpsertDeployment(h.NextIndex(), deployment)) + must.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -5117,7 +5094,7 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { FinishedAt: time.Now().Add(-10 * time.Hour)}} allocs[1].DesiredTransition.Reschedule = pointer.Of(true) - require.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -5128,16 +5105,16 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.Nil(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation - require.Nil(h.Process(NewServiceScheduler, eval)) + must.NoError(t, h.Process(NewServiceScheduler, eval)) if failedDeployment { // Verify no plan created - require.Len(h.Plans, 0) + must.Len(t, 0, h.Plans) } else { - require.Len(h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan allocated @@ -5160,13 +5137,13 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { // Create a node node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a complete alloc alloc := mock.Alloc() @@ -5175,7 +5152,7 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.ClientStatus = structs.AllocClientStatusComplete - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5186,7 +5163,7 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -5202,7 +5179,7 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure no allocations placed if len(out) != 1 { @@ -5219,13 +5196,13 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { // Create a node node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) tgName := job.TaskGroups[0].Name now := time.Now() @@ -5240,7 +5217,7 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { alloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead", StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5251,7 +5228,7 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -5267,7 +5244,7 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure a replacement alloc was placed. if len(out) != 2 { @@ -5291,14 +5268,14 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { // Create a node node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a job job := mock.Job() job.ID = "my-job" job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 3 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Desired = 3 // Mark one as lost and then schedule @@ -5324,7 +5301,7 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5335,7 +5312,7 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -5351,7 +5328,7 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure a replacement alloc was placed. if len(out) != 4 { @@ -5368,7 +5345,7 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { for _, alloc := range out { actual[alloc.Name] += 1 } - require.Equal(t, actual, expected) + must.Eq(t, expected, actual) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -5379,13 +5356,13 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { h := NewHarness(t) node := mock.DrainNode() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) tgName := job.TaskGroups[0].Name now := time.Now() @@ -5400,7 +5377,7 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { alloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead", StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5411,7 +5388,7 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -5436,14 +5413,14 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { // alloc and a fresh undrained one node := mock.DrainNode() node2 := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a successful alloc alloc := mock.Alloc() @@ -5463,7 +5440,7 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { }, }, } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to rerun the job eval := &structs.Evaluation{ @@ -5474,7 +5451,7 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -5490,7 +5467,7 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure no replacement alloc was placed. if len(out) != 1 { @@ -5512,13 +5489,13 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.Job() job.Type = structs.JobTypeBatch - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -5530,7 +5507,7 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to trigger the job eval := &structs.Evaluation{ @@ -5541,7 +5518,7 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -5566,13 +5543,13 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.Job() job.Type = structs.JobTypeBatch - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -5584,7 +5561,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.Job() @@ -5592,7 +5569,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { job2.Type = structs.JobTypeBatch job2.Version++ job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"} - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) allocs = nil for i := 0; i < 10; i++ { @@ -5615,7 +5592,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { } allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -5626,7 +5603,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -5651,14 +5628,14 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { // alloc and a fresh undrained one node := mock.DrainNode() node2 := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a running alloc alloc := mock.Alloc() @@ -5667,13 +5644,13 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.ClientStatus = structs.AllocClientStatusRunning - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create an update job job2 := job.Copy() job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"} job2.Version++ - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5685,7 +5662,7 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -5724,14 +5701,14 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) { // alloc and a fresh undrained one node := mock.DrainNode() node2 := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a complete alloc alloc := mock.Alloc() @@ -5750,7 +5727,7 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) { }, }, } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5762,7 +5739,7 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -5788,13 +5765,13 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { // Create a node node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) scoreMetric := &structs.AllocMetric{ NodesEvaluated: 10, @@ -5819,12 +5796,12 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { alloc.Metrics = scoreMetric allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job's modify index to force an inplace upgrade updatedJob := job.Copy() updatedJob.JobModifyIndex = job.JobModifyIndex + 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, updatedJob)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, updatedJob)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5836,7 +5813,7 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -5851,14 +5828,13 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { plan := h.Plans[0] - require := require.New(t) // Ensure the plan evicted 4 of the 5 - require.Equal(4, len(plan.NodeUpdate[node.ID])) + must.Eq(t, 4, len(plan.NodeUpdate[node.ID])) // Ensure that the scheduler did not overwrite the original score metrics for the i for _, inPlaceAllocs := range plan.NodeAllocation { for _, alloc := range inPlaceAllocs { - require.Equal(scoreMetric, alloc.Metrics) + must.Eq(t, scoreMetric, alloc.Metrics) } } h.AssertEvalStatus(t, structs.EvalStatusComplete) @@ -5945,12 +5921,12 @@ func TestGenericSched_AllocFit_Lifecycle(t *testing.T) { node := mock.Node() node.NodeResources.Processors = processorResources node.NodeResources.Cpu = legacyCpuResources - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a job with sidecar & init tasks job := mock.VariableLifecycleJob(testCase.TaskResources, testCase.MainTaskCount, testCase.InitTaskCount, testCase.SideTaskCount) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5961,26 +5937,26 @@ func TestGenericSched_AllocFit_Lifecycle(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) allocs := 0 if testCase.ShouldPlaceAlloc { allocs = 1 } // Ensure no plan as it should be a no-op - require.Len(t, h.Plans, allocs) + must.Len(t, allocs, h.Plans) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure no allocations placed - require.Len(t, out, allocs) + must.Len(t, allocs, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) }) @@ -5995,7 +5971,7 @@ func TestGenericSched_AllocFit_MemoryOversubscription(t *testing.T) { node.NodeResources.Cpu.CpuShares = 10000 node.NodeResources.Memory.MemoryMB = 1224 node.ReservedResources.Memory.MemoryMB = 60 - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) job := mock.Job() job.TaskGroups[0].Count = 10 @@ -6003,7 +5979,7 @@ func TestGenericSched_AllocFit_MemoryOversubscription(t *testing.T) { job.TaskGroups[0].Tasks[0].Resources.MemoryMB = 200 job.TaskGroups[0].Tasks[0].Resources.MemoryMaxMB = 500 job.TaskGroups[0].Tasks[0].Resources.DiskMB = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -6014,22 +5990,22 @@ func TestGenericSched_AllocFit_MemoryOversubscription(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // expectedAllocs should be floor((nodeResources.MemoryMB-reservedResources.MemoryMB) / job.MemoryMB) expectedAllocs := 5 - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) - require.Len(t, out, expectedAllocs) + must.Len(t, expectedAllocs, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -6042,12 +6018,12 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -6058,7 +6034,7 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewServiceScheduler, eval); err != nil { t.Fatalf("err: %v", err) @@ -6078,7 +6054,7 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { job1.ID = job.ID job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" job1.TaskGroups[0].Count = 12 - require.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), nil, job1)) + must.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), nil, job1)) // Create a mock evaluation to update the job eval1 := &structs.Evaluation{ @@ -6089,7 +6065,7 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { JobID: job1.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1})) // Process the evaluation if err := h1.Process(NewServiceScheduler, eval1); err != nil { @@ -6190,14 +6166,14 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { job.JobModifyIndex = job.CreateIndex + 1 job.ModifyIndex = job.CreateIndex + 1 job.Stop = true - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a deployment d := mock.Deployment() d.JobID = job.ID d.JobCreateIndex = job.CreateIndex d.JobModifyIndex = job.JobModifyIndex - 1 - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) + must.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -6209,7 +6185,7 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -6226,7 +6202,7 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { // Ensure the plan cancelled the existing deployment ws := memdb.NewWatchSet() out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID) - require.NoError(t, err) + must.NoError(t, err) if out == nil { t.Fatalf("No deployment for job") @@ -6263,15 +6239,15 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { // Generate a fake job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a deployment for an old version of the job d := mock.Deployment() d.JobID = job.ID - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) + must.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) // Upsert again to bump job version - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to kick the job eval := &structs.Evaluation{ @@ -6283,7 +6259,7 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -6300,7 +6276,7 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { // Ensure the plan cancelled the existing deployment ws := memdb.NewWatchSet() out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID) - require.NoError(t, err) + must.NoError(t, err) if out == nil { t.Fatalf("No deployment for job") @@ -6555,11 +6531,10 @@ func Test_updateRescheduleTracker(t *testing.T) { for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { - require := require.New(t) prevAlloc.RescheduleTracker = &structs.RescheduleTracker{Events: tc.prevAllocEvents} prevAlloc.Job.LookupTaskGroup(prevAlloc.TaskGroup).ReschedulePolicy = tc.reschedPolicy updateRescheduleTracker(alloc, prevAlloc, tc.reschedTime) - require.Equal(tc.expectedRescheduleEvents, alloc.RescheduleTracker.Events) + must.Eq(t, tc.expectedRescheduleEvents, alloc.RescheduleTracker.Events) }) } @@ -6568,7 +6543,6 @@ func Test_updateRescheduleTracker(t *testing.T) { func TestServiceSched_Preemption(t *testing.T) { ci.Parallel(t) - require := require.New(t) h := NewHarness(t) legacyCpuResources, processorResources := cpuResources(1000) @@ -6609,7 +6583,7 @@ func TestServiceSched_Preemption(t *testing.T) { ReservedHostPorts: "22", }, } - require.NoError(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a couple of jobs and schedule them job1 := mock.Job() @@ -6619,7 +6593,7 @@ func TestServiceSched_Preemption(t *testing.T) { r1 := job1.TaskGroups[0].Tasks[0].Resources r1.CPU = 500 r1.MemoryMB = 1024 - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)) job2 := mock.Job() job2.TaskGroups[0].Count = 1 @@ -6628,7 +6602,7 @@ func TestServiceSched_Preemption(t *testing.T) { r2 := job2.TaskGroups[0].Tasks[0].Resources r2.CPU = 350 r2.MemoryMB = 512 - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to register the jobs eval1 := &structs.Evaluation{ @@ -6648,29 +6622,29 @@ func TestServiceSched_Preemption(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1, eval2})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1, eval2})) expectedPreemptedAllocs := make(map[string]struct{}) // Process the two evals for job1 and job2 and make sure they allocated for index, eval := range []*structs.Evaluation{eval1, eval2} { // Process the evaluation err := h.Process(NewServiceScheduler, eval) - require.Nil(err) + must.NoError(t, err) plan := h.Plans[index] // Ensure the plan doesn't have annotations. - require.Nil(plan.Annotations) + must.Nil(t, plan.Annotations) // Ensure the eval has no spawned blocked eval - require.Equal(0, len(h.CreateEvals)) + must.Eq(t, 0, len(h.CreateEvals)) // Ensure the plan allocated var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Equal(1, len(planned)) + must.Eq(t, 1, len(planned)) expectedPreemptedAllocs[planned[0].ID] = struct{}{} } @@ -6682,7 +6656,7 @@ func TestServiceSched_Preemption(t *testing.T) { r3 := job3.TaskGroups[0].Tasks[0].Resources r3.CPU = 900 r3.MemoryMB = 1700 - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job3)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job3)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -6694,37 +6668,37 @@ func TestServiceSched_Preemption(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) - require.Nil(err) + must.NoError(t, err) // New plan should be the third one in the harness plan := h.Plans[2] // Ensure the eval has no spawned blocked eval - require.Equal(0, len(h.CreateEvals)) + must.Eq(t, 0, len(h.CreateEvals)) // Ensure the plan allocated var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Equal(1, len(planned)) + must.Eq(t, 1, len(planned)) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job3.Namespace, job3.ID, false) - require.NoError(err) + must.NoError(t, err) // Ensure all allocations placed - require.Equal(1, len(out)) + must.Eq(t, 1, len(out)) actualPreemptedAllocs := make(map[string]struct{}) for _, id := range out[0].PreemptedAllocations { actualPreemptedAllocs[id] = struct{}{} } - require.Equal(expectedPreemptedAllocs, actualPreemptedAllocs) + must.Eq(t, expectedPreemptedAllocs, actualPreemptedAllocs) } // TestServiceSched_Migrate_NonCanary asserts that when rescheduling @@ -6735,7 +6709,7 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { h := NewHarness(t) node1 := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1)) job := mock.Job() job.Stable = true @@ -6744,7 +6718,7 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { MaxParallel: 1, Canary: 1, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) deployment := &structs.Deployment{ ID: uuid.Generate(), @@ -6759,7 +6733,7 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { Status: structs.DeploymentStatusSuccessful, StatusDescription: structs.DeploymentStatusDescriptionSuccessful, } - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) + must.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) alloc := mock.Alloc() alloc.Job = job @@ -6770,7 +6744,7 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusRun alloc.ClientStatus = structs.AllocClientStatusRunning alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation eval := &structs.Evaluation{ @@ -6781,19 +6755,19 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] - require.Contains(t, plan.NodeAllocation, node1.ID) + must.MapContainsKey(t, plan.NodeAllocation, node1.ID) allocs := plan.NodeAllocation[node1.ID] - require.Len(t, allocs, 1) + must.Len(t, 1, allocs) } @@ -6807,7 +6781,7 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { h := NewHarness(t) node1 := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1)) totalCount := 3 desiredCanaries := 1 @@ -6819,7 +6793,7 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { MaxParallel: 1, Canary: desiredCanaries, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) deployment := &structs.Deployment{ ID: uuid.Generate(), @@ -6834,7 +6808,7 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { Status: structs.DeploymentStatusSuccessful, StatusDescription: structs.DeploymentStatusDescriptionSuccessful, } - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) + must.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) var allocs []*structs.Allocation for i := 0; i < 3; i++ { @@ -6845,13 +6819,13 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // new update with new task group job2 := job.Copy() job2.Stable = false job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -6862,50 +6836,50 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure a deployment was created - require.NotNil(t, plan.Deployment) + must.NotNil(t, plan.Deployment) updateDeployment := plan.Deployment.ID // Check status first - should be 4 allocs, only one is canary { ws := memdb.NewWatchSet() allocs, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, true) - require.NoError(t, err) - require.Len(t, allocs, 4) + must.NoError(t, err) + must.Len(t, 4, allocs) sort.Slice(allocs, func(i, j int) bool { return allocs[i].CreateIndex < allocs[j].CreateIndex }) for _, a := range allocs[:3] { - require.Equal(t, structs.AllocDesiredStatusRun, a.DesiredStatus) - require.Equal(t, uint64(0), a.Job.Version) - require.False(t, a.DeploymentStatus.IsCanary()) - require.Equal(t, node1.ID, a.NodeID) - require.Equal(t, deployment.ID, a.DeploymentID) + must.Eq(t, structs.AllocDesiredStatusRun, a.DesiredStatus) + must.Eq(t, uint64(0), a.Job.Version) + must.False(t, a.DeploymentStatus.IsCanary()) + must.Eq(t, node1.ID, a.NodeID) + must.Eq(t, deployment.ID, a.DeploymentID) } - require.Equal(t, structs.AllocDesiredStatusRun, allocs[3].DesiredStatus) - require.Equal(t, uint64(1), allocs[3].Job.Version) - require.True(t, allocs[3].DeploymentStatus.Canary) - require.Equal(t, node1.ID, allocs[3].NodeID) - require.Equal(t, updateDeployment, allocs[3].DeploymentID) + must.Eq(t, structs.AllocDesiredStatusRun, allocs[3].DesiredStatus) + must.Eq(t, uint64(1), allocs[3].Job.Version) + must.True(t, allocs[3].DeploymentStatus.Canary) + must.Eq(t, node1.ID, allocs[3].NodeID) + must.Eq(t, updateDeployment, allocs[3].DeploymentID) } // now, drain node1 and ensure all are migrated to node2 node1 = node1.Copy() node1.Status = structs.NodeStatusDown - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1)) node2 := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) neval := &structs.Evaluation{ Namespace: structs.DefaultNamespace, @@ -6916,11 +6890,11 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{neval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{neval})) // Process the evaluation err = h.Process(NewServiceScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Now test that all node1 allocs are migrated while preserving Version and Canary info { @@ -6934,35 +6908,35 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { ws := memdb.NewWatchSet() allocs, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, true) - require.NoError(t, err) - require.Len(t, allocs, 4+expectedMigrations) + must.NoError(t, err) + must.Len(t, 4+expectedMigrations, allocs) nodeAllocs := map[string][]*structs.Allocation{} for _, a := range allocs { nodeAllocs[a.NodeID] = append(nodeAllocs[a.NodeID], a) } - require.Len(t, nodeAllocs[node1.ID], 4) + must.Len(t, 4, nodeAllocs[node1.ID]) for _, a := range nodeAllocs[node1.ID] { - require.Equal(t, structs.AllocDesiredStatusStop, a.DesiredStatus) - require.Equal(t, node1.ID, a.NodeID) + must.Eq(t, structs.AllocDesiredStatusStop, a.DesiredStatus) + must.Eq(t, node1.ID, a.NodeID) } node2Allocs := nodeAllocs[node2.ID] - require.Len(t, node2Allocs, expectedMigrations) + must.Len(t, expectedMigrations, node2Allocs) sort.Slice(node2Allocs, func(i, j int) bool { return node2Allocs[i].Job.Version < node2Allocs[j].Job.Version }) for _, a := range node2Allocs[:3] { - require.Equal(t, structs.AllocDesiredStatusRun, a.DesiredStatus) - require.Equal(t, uint64(0), a.Job.Version) - require.Equal(t, node2.ID, a.NodeID) - require.Equal(t, deployment.ID, a.DeploymentID) + must.Eq(t, structs.AllocDesiredStatusRun, a.DesiredStatus) + must.Eq(t, uint64(0), a.Job.Version) + must.Eq(t, node2.ID, a.NodeID) + must.Eq(t, deployment.ID, a.DeploymentID) } if rescheduleCanary { - require.Equal(t, structs.AllocDesiredStatusRun, node2Allocs[3].DesiredStatus) - require.Equal(t, uint64(1), node2Allocs[3].Job.Version) - require.Equal(t, node2.ID, node2Allocs[3].NodeID) - require.Equal(t, updateDeployment, node2Allocs[3].DeploymentID) + must.Eq(t, structs.AllocDesiredStatusRun, node2Allocs[3].DesiredStatus) + must.Eq(t, uint64(1), node2Allocs[3].Job.Version) + must.Eq(t, node2.ID, node2Allocs[3].NodeID) + must.Eq(t, updateDeployment, node2Allocs[3].DeploymentID) } } } @@ -6987,9 +6961,9 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { // update is a "destructive" update and has been updated manually promoted bool - // requireCanaries indicate whether the job update requires placing canaries due to + // mustCanaries indicate whether the job update requires placing canaries due to // it being a destructive update compared to the latest promoted deployment. - requireCanaries bool + mustCanaries bool // the expected version for migrating a stable non-canary alloc after applying this update expectedVersion uint64 @@ -7014,7 +6988,7 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { job := mock.Job() job.Version = 0 job.Stable = true - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) initDeployment := &structs.Deployment{ ID: uuid.Generate(), @@ -7032,7 +7006,7 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { Status: structs.DeploymentStatusSuccessful, StatusDescription: structs.DeploymentStatusDescriptionSuccessful, } - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), initDeployment)) + must.NoError(t, h.State.UpsertDeployment(h.NextIndex(), initDeployment)) deploymentIDs := []string{initDeployment.ID} @@ -7043,10 +7017,10 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { nj.Version = u.version nj.TaskGroups[0].Tasks[0].Env["version"] = fmt.Sprintf("%v", u.version) nj.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, nj)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, nj)) desiredCanaries := 1 - if !u.requireCanaries { + if !u.mustCanaries { desiredCanaries = 0 } deployment := &structs.Deployment{ @@ -7066,7 +7040,7 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { Status: structs.DeploymentStatusSuccessful, StatusDescription: structs.DeploymentStatusDescriptionSuccessful, } - require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) + must.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment)) deploymentIDs = append(deploymentIDs, deployment.ID) @@ -7080,9 +7054,9 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { // Here, assert the downgraded job version foundDeploymentID, foundJob, err := sched.downgradedJobForPlacement(placement) - require.NoError(t, err) - require.Equal(t, u.expectedVersion, foundJob.Version) - require.Equal(t, deploymentIDs[u.expectedVersion], foundDeploymentID) + must.NoError(t, err) + must.Eq(t, u.expectedVersion, foundJob.Version) + must.Eq(t, deploymentIDs[u.expectedVersion], foundDeploymentID) }) } } @@ -7095,7 +7069,7 @@ func TestServiceSched_RunningWithNextAllocation(t *testing.T) { h := NewHarness(t) node1 := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1)) totalCount := 2 job := mock.Job() @@ -7103,7 +7077,7 @@ func TestServiceSched_RunningWithNextAllocation(t *testing.T) { job.Stable = true job.TaskGroups[0].Count = totalCount job.TaskGroups[0].Update = nil - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < totalCount+1; i++ { @@ -7118,13 +7092,13 @@ func TestServiceSched_RunningWithNextAllocation(t *testing.T) { // simulate a case where .NextAllocation is set but alloc is still running allocs[2].PreviousAllocation = allocs[0].ID allocs[0].NextAllocation = allocs[2].ID - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // new update with new task group job2 := job.Copy() job2.Version = 1 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -7135,39 +7109,38 @@ func TestServiceSched_RunningWithNextAllocation(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // assert that all original allocations have been stopped for _, alloc := range allocs { updated, err := h.State.AllocByID(nil, alloc.ID) - require.NoError(t, err) - require.Equalf(t, structs.AllocDesiredStatusStop, updated.DesiredStatus, "alloc %v", alloc.ID) + must.NoError(t, err) + must.Eq(t, structs.AllocDesiredStatusStop, updated.DesiredStatus, must.Sprintf("alloc %v", alloc.ID)) } // assert that the new job has proper allocations jobAllocs, err := h.State.AllocsByJob(nil, job.Namespace, job.ID, true) - require.NoError(t, err) + must.NoError(t, err) - require.Len(t, jobAllocs, 5) + must.Len(t, 5, jobAllocs) allocsByVersion := map[uint64][]string{} for _, alloc := range jobAllocs { allocsByVersion[alloc.Job.Version] = append(allocsByVersion[alloc.Job.Version], alloc.ID) } - require.Len(t, allocsByVersion[1], 2) - require.Len(t, allocsByVersion[0], 3) + must.Len(t, 2, allocsByVersion[1]) + must.Len(t, 3, allocsByVersion[0]) } func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { ci.Parallel(t) h := NewHarness(t) - require := require.New(t) // Create some nodes, each running the CSI plugin for i := 0; i < 5; i++ { @@ -7179,7 +7152,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { NodeInfo: &structs.CSINodeInfo{MaxVolumes: 2}, }, } - require.NoError(h.State.UpsertNode( + must.NoError(t, h.State.UpsertNode( structs.MsgTypeTestSetup, h.NextIndex(), node)) } @@ -7203,7 +7176,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { // once its been fixed shared.AccessMode = structs.CSIVolumeAccessModeMultiNodeReader - require.NoError(h.State.UpsertCSIVolume( + must.NoError(t, h.State.UpsertCSIVolume( h.NextIndex(), []*structs.CSIVolume{shared, vol0, vol1, vol2})) // Create a job that uses both @@ -7224,7 +7197,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { }, } - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -7236,30 +7209,30 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation and expect a single plan without annotations err := h.Process(NewServiceScheduler, eval) - require.NoError(err) - require.Len(h.Plans, 1, "expected one plan") - require.Nil(h.Plans[0].Annotations, "expected no annotations") + must.NoError(t, err) + must.Len(t, 1, h.Plans, must.Sprint("expected one plan")) + must.Nil(t, h.Plans[0].Annotations, must.Sprint("expected no annotations")) // Expect the eval has not spawned a blocked eval - require.Equal(len(h.CreateEvals), 0) - require.Equal("", h.Evals[0].BlockedEval, "did not expect a blocked eval") - require.Equal(structs.EvalStatusComplete, h.Evals[0].Status) + must.Eq(t, len(h.CreateEvals), 0) + must.Eq(t, "", h.Evals[0].BlockedEval, must.Sprint("did not expect a blocked eval")) + must.Eq(t, structs.EvalStatusComplete, h.Evals[0].Status) // Ensure the plan allocated and we got expected placements var planned []*structs.Allocation for _, allocList := range h.Plans[0].NodeAllocation { planned = append(planned, allocList...) } - require.Len(planned, 3, "expected 3 planned allocations") + must.Len(t, 3, planned, must.Sprint("expected 3 planned allocations")) out, err := h.State.AllocsByJob(nil, job.Namespace, job.ID, false) - require.NoError(err) - require.Len(out, 3, "expected 3 placed allocations") + must.NoError(t, err) + must.Len(t, 3, out, must.Sprint("expected 3 placed allocations")) // Allocations don't have references to the actual volumes assigned, but // because we set a max of 2 volumes per Node plugin, we can verify that @@ -7268,27 +7241,27 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { seen := map[string]struct{}{} for _, alloc := range out { _, ok := seen[alloc.NodeID] - require.False(ok, "allocations should be scheduled to separate nodes") + must.False(t, ok, must.Sprint("allocations should be scheduled to separate nodes")) seen[alloc.NodeID] = struct{}{} } // Update the job to 5 instances job.TaskGroups[0].Count = 5 - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a new eval and process it. It should not create a new plan. eval.ID = uuid.Generate() - require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) err = h.Process(NewServiceScheduler, eval) - require.NoError(err) - require.Len(h.Plans, 1, "expected one plan") + must.NoError(t, err) + must.Len(t, 1, h.Plans, must.Sprint("expected one plan")) // Expect the eval to have failed - require.NotEqual("", h.Evals[1].BlockedEval, - "expected a blocked eval to be spawned") - require.Equal(2, h.Evals[1].QueuedAllocations["web"], "expected 2 queued allocs") - require.Equal(5, h.Evals[1].FailedTGAllocs["web"]. + must.NotEq(t, "", h.Evals[1].BlockedEval, + must.Sprint("expected a blocked eval to be spawned")) + must.Eq(t, 2, h.Evals[1].QueuedAllocations["web"], must.Sprint("expected 2 queued allocs")) + must.Eq(t, 5, h.Evals[1].FailedTGAllocs["web"]. ConstraintFiltered["missing CSI Volume volume-unique[3]"]) // Upsert 2 more per-alloc volumes @@ -7296,37 +7269,37 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { vol4.ID = "volume-unique[3]" vol5 := vol0.Copy() vol5.ID = "volume-unique[4]" - require.NoError(h.State.UpsertCSIVolume( + must.NoError(t, h.State.UpsertCSIVolume( h.NextIndex(), []*structs.CSIVolume{vol4, vol5})) // Process again with failure fixed. It should create a new plan eval.ID = uuid.Generate() - require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) err = h.Process(NewServiceScheduler, eval) - require.NoError(err) - require.Len(h.Plans, 2, "expected two plans") - require.Nil(h.Plans[1].Annotations, "expected no annotations") + must.NoError(t, err) + must.Len(t, 2, h.Plans, must.Sprint("expected two plans")) + must.Nil(t, h.Plans[1].Annotations, must.Sprint("expected no annotations")) - require.Equal("", h.Evals[2].BlockedEval, "did not expect a blocked eval") - require.Len(h.Evals[2].FailedTGAllocs, 0) + must.Eq(t, "", h.Evals[2].BlockedEval, must.Sprint("did not expect a blocked eval")) + must.MapLen(t, 0, h.Evals[2].FailedTGAllocs) // Ensure the plan allocated and we got expected placements planned = []*structs.Allocation{} for _, allocList := range h.Plans[1].NodeAllocation { planned = append(planned, allocList...) } - require.Len(planned, 2, "expected 2 new planned allocations") + must.Len(t, 2, planned, must.Sprint("expected 2 new planned allocations")) out, err = h.State.AllocsByJob(nil, job.Namespace, job.ID, false) - require.NoError(err) - require.Len(out, 5, "expected 5 placed allocations total") + must.NoError(t, err) + must.Len(t, 5, out, must.Sprint("expected 5 placed allocations total")) // Make sure they're still all on seperate clients seen = map[string]struct{}{} for _, alloc := range out { _, ok := seen[alloc.NodeID] - require.False(ok, "allocations should be scheduled to separate nodes") + must.False(t, ok, must.Sprint("allocations should be scheduled to separate nodes")) seen[alloc.NodeID] = struct{}{} } @@ -7354,7 +7327,7 @@ func TestServiceSched_CSITopology(t *testing.T) { }, }, } - require.NoError(t, h.State.UpsertNode( + must.NoError(t, h.State.UpsertNode( structs.MsgTypeTestSetup, h.NextIndex(), node)) } @@ -7375,7 +7348,7 @@ func TestServiceSched_CSITopology(t *testing.T) { vol1.PluginID = "test-plugin-zone-1" vol1.RequestedTopologies.Required[0].Segments["zone"] = "zone-1" - require.NoError(t, h.State.UpsertCSIVolume( + must.NoError(t, h.State.UpsertCSIVolume( h.NextIndex(), []*structs.CSIVolume{vol0, vol1})) // Create a job that uses those volumes @@ -7391,7 +7364,7 @@ func TestServiceSched_CSITopology(t *testing.T) { }, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -7403,19 +7376,19 @@ func TestServiceSched_CSITopology(t *testing.T) { Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation and expect a single plan without annotations err := h.Process(NewServiceScheduler, eval) - require.NoError(t, err) - require.Len(t, h.Plans, 1, "expected one plan") - require.Nil(t, h.Plans[0].Annotations, "expected no annotations") + must.NoError(t, err) + must.Len(t, 1, h.Plans, must.Sprint("expected one plan")) + must.Nil(t, h.Plans[0].Annotations, must.Sprint("expected no annotations")) // Expect the eval has not spawned a blocked eval - require.Equal(t, len(h.CreateEvals), 0) - require.Equal(t, "", h.Evals[0].BlockedEval, "did not expect a blocked eval") - require.Equal(t, structs.EvalStatusComplete, h.Evals[0].Status) + must.Eq(t, len(h.CreateEvals), 0) + must.Eq(t, "", h.Evals[0].BlockedEval, must.Sprint("did not expect a blocked eval")) + must.Eq(t, structs.EvalStatusComplete, h.Evals[0].Status) } @@ -7493,7 +7466,7 @@ func TestServiceSched_Client_Disconnect_Creates_Updates_and_Evals(t *testing.T) // Pending update should have unknown status. for _, nodeAlloc := range h.Plans[0].NodeAllocation[disconnectedNode.ID] { - require.Equal(t, nodeAlloc.ClientStatus, structs.AllocClientStatusUnknown) + must.Eq(t, nodeAlloc.ClientStatus, structs.AllocClientStatusUnknown) } // Simulate that NodeAllocation got processed. @@ -7617,7 +7590,7 @@ func initNodeAndAllocs(t *testing.T, h *Harness, job *structs.Job, // Node, which is ready node := mock.Node() node.Status = nodeStatus - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) allocs := make([]*structs.Allocation, job.TaskGroups[0].Count) for i := 0; i < job.TaskGroups[0].Count; i++ { @@ -7633,7 +7606,7 @@ func initNodeAndAllocs(t *testing.T, h *Harness, job *structs.Job, allocs[i] = alloc } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) return node, job, allocs } diff --git a/scheduler/numa_ce_test.go b/scheduler/numa_ce_test.go index 1f10bab7e..3db23d4ea 100644 --- a/scheduler/numa_ce_test.go +++ b/scheduler/numa_ce_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/nomad/client/lib/numalib" "github.com/hashicorp/nomad/client/lib/numalib/hw" "github.com/hashicorp/nomad/nomad/structs" - "github.com/stretchr/testify/require" + "github.com/shoenig/test/must" ) func TestCoreSelectorSelect(t *testing.T) { @@ -46,7 +46,7 @@ func TestCoreSelectorSelect(t *testing.T) { GuessSpeed: 0, } } - require.Equal(t, coreIds, []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47}) + must.Eq(t, []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47}, coreIds) selector := &coreSelector{ topology: &numalib.Topology{ @@ -88,8 +88,8 @@ func TestCoreSelectorSelect(t *testing.T) { } { t.Run(test.name, func(t *testing.T) { ids, mhz := selector.Select(test.resources) - require.Equal(t, test.expectedIds, ids) - require.Equal(t, test.expectedMhz, mhz) + must.Eq(t, test.expectedIds, ids) + must.Eq(t, test.expectedMhz, mhz) }) } } diff --git a/scheduler/preemption_test.go b/scheduler/preemption_test.go index eb718f5dc..a83ed85ae 100644 --- a/scheduler/preemption_test.go +++ b/scheduler/preemption_test.go @@ -5,18 +5,15 @@ package scheduler import ( "fmt" - "maps" "strconv" "testing" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/client/lib/numalib" - "github.com/hashicorp/nomad/client/lib/numalib/hw" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" psstructs "github.com/hashicorp/nomad/plugins/shared/structs" - "github.com/stretchr/testify/require" + "github.com/shoenig/test/must" ) func TestResourceDistance(t *testing.T) { @@ -140,80 +137,14 @@ func TestResourceDistance(t *testing.T) { for _, tc := range testCases { t.Run("", func(t *testing.T) { - require := require.New(t) actualDistance := fmt.Sprintf("%3.3f", basicResourceDistance(resourceAsk, tc.allocResource)) - require.Equal(tc.expectedDistance, actualDistance) + must.Eq(t, tc.expectedDistance, actualDistance) }) } } -func makeNodeResources(devices []*structs.NodeDeviceResource, busAssociativity map[string]hw.NodeID) *structs.NodeResources { - makeCore := func(node hw.NodeID, id hw.CoreID) numalib.Core { - sockets := map[hw.NodeID]hw.SocketID{ - 0: 0, - 1: 0, - 2: 1, - 3: 1, - } - return numalib.Core{ - NodeID: node, - SocketID: sockets[node], - ID: id, - Grade: numalib.Performance, - BaseSpeed: 4000, - } - } - - // 2 socket, 4 numa node system, 2 cores per node - processors := structs.NodeProcessorResources{ - Topology: &numalib.Topology{ - Nodes: []uint8{0, 1, 2, 3}, - Distances: numalib.SLIT{ - []numalib.Cost{10, 12, 32, 32}, - []numalib.Cost{12, 10, 32, 32}, - []numalib.Cost{32, 32, 10, 12}, - []numalib.Cost{32, 32, 12, 10}, - }, - Cores: []numalib.Core{ - makeCore(0, 0), - makeCore(0, 1), - makeCore(1, 2), - makeCore(1, 3), - makeCore(2, 4), - makeCore(2, 5), - makeCore(3, 6), - makeCore(3, 7), - }, - }, - } - - defaultNodeResources := &structs.NodeResources{ - Processors: processors, - Memory: structs.NodeMemoryResources{ - MemoryMB: 8192, - }, - Disk: structs.NodeDiskResources{ - DiskMB: 100 * 1024, - }, - Networks: []*structs.NetworkResource{ - { - Device: "eth0", - CIDR: "192.168.0.100/32", - MBits: 1000, - }, - }, - Devices: devices, - } - - defaultNodeResources.Compatibility() - - defaultNodeResources.Processors.Topology.BusAssociativity = maps.Clone(busAssociativity) - - return defaultNodeResources -} - func makeDeviceInstance(instanceID, busID string) *structs.NodeDevice { return &structs.NodeDevice{ ID: instanceID, @@ -1395,10 +1326,9 @@ func TestPreemption_Normal(t *testing.T) { for _, alloc := range tc.currentAllocations { alloc.NodeID = node.ID } - require := require.New(t) err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, tc.currentAllocations) - require.Nil(err) + must.NoError(t, err) if tc.currentPreemptions != nil { ctx.plan.NodePreemptions[node.ID] = tc.currentPreemptions } @@ -1422,14 +1352,14 @@ func TestPreemption_Normal(t *testing.T) { binPackIter.SetTaskGroup(taskGroup) option := binPackIter.Next() if tc.preemptedAllocIDs == nil { - require.Nil(option) + must.Nil(t, option) } else { - require.NotNil(option) + must.NotNil(t, option) preemptedAllocs := option.PreemptedAllocs - require.Equal(len(tc.preemptedAllocIDs), len(preemptedAllocs)) + must.Eq(t, len(tc.preemptedAllocIDs), len(preemptedAllocs)) for _, alloc := range preemptedAllocs { _, ok := tc.preemptedAllocIDs[alloc.ID] - require.Truef(ok, "alloc %s was preempted unexpectedly", alloc.ID) + must.True(t, ok, must.Sprintf("alloc %s was preempted unexpectedly", alloc.ID)) } } }) @@ -1502,7 +1432,7 @@ func TestPreemptionMultiple(t *testing.T) { }, } - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // low priority job with 4 allocs using all 4 GPUs lowPrioJob := mock.Job() @@ -1515,7 +1445,7 @@ func TestPreemptionMultiple(t *testing.T) { Name: "gpu", Count: 1, }} - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, lowPrioJob)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, lowPrioJob)) allocs := []*structs.Allocation{} allocIDs := map[string]struct{}{} @@ -1531,7 +1461,7 @@ func TestPreemptionMultiple(t *testing.T) { allocs = append(allocs, alloc) allocIDs[alloc.ID] = struct{}{} } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // new high priority job with 2 allocs, each using 2 GPUs highPrioJob := mock.Job() @@ -1544,7 +1474,7 @@ func TestPreemptionMultiple(t *testing.T) { Name: "gpu", Count: 2, }} - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, highPrioJob)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, highPrioJob)) // schedule eval := &structs.Evaluation{ @@ -1555,18 +1485,18 @@ func TestPreemptionMultiple(t *testing.T) { JobID: highPrioJob.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation - require.NoError(t, h.Process(NewServiceScheduler, eval)) - require.Len(t, h.Plans, 1) - require.Contains(t, h.Plans[0].NodePreemptions, node.ID) + must.NoError(t, h.Process(NewServiceScheduler, eval)) + must.Len(t, 1, h.Plans) + must.MapContainsKey(t, h.Plans[0].NodePreemptions, node.ID) preempted := map[string]struct{}{} for _, alloc := range h.Plans[0].NodePreemptions[node.ID] { preempted[alloc.ID] = struct{}{} } - require.Equal(t, allocIDs, preempted) + must.Eq(t, allocIDs, preempted) } // helper method to create allocations with given jobs and resources diff --git a/scheduler/rank_test.go b/scheduler/rank_test.go index addf2336a..04146b406 100644 --- a/scheduler/rank_test.go +++ b/scheduler/rank_test.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/shoenig/test" "github.com/shoenig/test/must" - "github.com/stretchr/testify/require" ) var testSchedulerConfig = &structs.SchedulerConfiguration{ @@ -255,15 +254,15 @@ func TestBinPackIterator_NoExistingAlloc_MixedReserve(t *testing.T) { } // 3 nodes should be feasible - require.Len(t, out, 3) + must.Len(t, 3, out) // Node without reservations is the best fit - require.Equal(t, nodes[0].Node.Name, out[0].Node.Name) + must.Eq(t, nodes[0].Node.Name, out[0].Node.Name) // Node with smallest remaining resources ("best fit") should get a // higher score than node with more remaining resources ("worse fit") - require.Equal(t, nodes[1].Node.Name, out[1].Node.Name) - require.Equal(t, nodes[2].Node.Name, out[2].Node.Name) + must.Eq(t, nodes[1].Node.Name, out[1].Node.Name) + must.Eq(t, nodes[2].Node.Name, out[2].Node.Name) } // Tests bin packing iterator with network resources at task and task group level @@ -367,27 +366,26 @@ func TestBinPackIterator_Network_Success(t *testing.T) { scoreNorm := NewScoreNormalizationIterator(ctx, binp) out := collectRanked(scoreNorm) - require := require.New(t) // We expect both nodes to be eligible to place - require.Len(out, 2) - require.Equal(out[0], nodes[0]) - require.Equal(out[1], nodes[1]) + must.Len(t, 2, out) + must.Eq(t, nodes[0], out[0]) + must.Eq(t, nodes[1], out[1]) // First node should have a perfect score - require.Equal(1.0, out[0].FinalScore) + must.Eq(t, 1.0, out[0].FinalScore) if out[1].FinalScore < 0.50 || out[1].FinalScore > 0.60 { t.Fatalf("Bad Score: %v", out[1].FinalScore) } // Verify network information at taskgroup level - require.Equal(500, out[0].AllocResources.Networks[0].MBits) - require.Equal(500, out[1].AllocResources.Networks[0].MBits) + must.Eq(t, 500, out[0].AllocResources.Networks[0].MBits) + must.Eq(t, 500, out[1].AllocResources.Networks[0].MBits) // Verify network information at task level - require.Equal(300, out[0].TaskResources["web"].Networks[0].MBits) - require.Equal(300, out[1].TaskResources["web"].Networks[0].MBits) + must.Eq(t, 300, out[0].TaskResources["web"].Networks[0].MBits) + must.Eq(t, 300, out[1].TaskResources["web"].Networks[0].MBits) } // Tests that bin packing iterator fails due to overprovisioning of network @@ -499,12 +497,11 @@ func TestBinPackIterator_Network_Failure(t *testing.T) { scoreNorm := NewScoreNormalizationIterator(ctx, binp) out := collectRanked(scoreNorm) - require := require.New(t) // We expect a placement failure because we need 800 mbits of network // and only 300 is free - require.Len(out, 0) - require.Equal(1, ctx.metrics.DimensionExhausted["network: bandwidth exceeded"]) + must.Len(t, 0, out) + must.Eq(t, 1, ctx.metrics.DimensionExhausted["network: bandwidth exceeded"]) } func TestBinPackIterator_Network_NoCollision_Node(t *testing.T) { @@ -595,7 +592,7 @@ func TestBinPackIterator_Network_NoCollision_Node(t *testing.T) { // Placement should succeed since reserved ports are merged instead of // treating them as a collision - require.Len(t, out, 1) + must.Len(t, 1, out) } // TestBinPackIterator_Network_NodeError asserts that NetworkIndex.SetNode can @@ -694,9 +691,9 @@ func TestBinPackIterator_Network_NodeError(t *testing.T) { // We expect a placement failure because the node has invalid reserved // ports - require.Len(t, out, 0) - require.Equal(t, 1, ctx.metrics.DimensionExhausted["network: invalid node"], - ctx.metrics.DimensionExhausted) + must.Len(t, 0, out) + must.Eq(t, 1, ctx.metrics.DimensionExhausted["network: invalid node"], + must.Sprint(ctx.metrics.DimensionExhausted)) } func TestBinPackIterator_Network_PortCollision_Alloc(t *testing.T) { @@ -786,9 +783,9 @@ func TestBinPackIterator_Network_PortCollision_Alloc(t *testing.T) { ClientStatus: structs.AllocClientStatusPending, TaskGroup: "web", } - require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) - require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + must.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) + must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) taskGroup := &structs.TaskGroup{ EphemeralDisk: &structs.EphemeralDisk{}, @@ -820,8 +817,8 @@ func TestBinPackIterator_Network_PortCollision_Alloc(t *testing.T) { out := collectRanked(scoreNorm) // We expect a placement failure due to port collision. - require.Len(t, out, 0) - require.Equal(t, 1, ctx.metrics.DimensionExhausted["network: port collision"]) + must.Len(t, 0, out) + must.Eq(t, 1, ctx.metrics.DimensionExhausted["network: port collision"]) } // Tests bin packing iterator with host network interpolation of task group level ports configuration @@ -961,18 +958,17 @@ func TestBinPackIterator_Network_Interpolation_Success(t *testing.T) { scoreNorm := NewScoreNormalizationIterator(ctx, binp) out := collectRanked(scoreNorm) - require := require.New(t) // We expect both nodes to be eligible to place - require.Len(out, 2) - require.Equal(out[0], nodes[0]) - require.Equal(out[1], nodes[1]) + must.Len(t, 2, out) + must.Eq(t, out[0], nodes[0]) + must.Eq(t, out[1], nodes[1]) // Verify network information at taskgroup level - require.Contains([]string{"public", "private"}, out[0].AllocResources.Networks[0].DynamicPorts[0].HostNetwork) - require.Contains([]string{"public", "private"}, out[0].AllocResources.Networks[0].DynamicPorts[1].HostNetwork) - require.Contains([]string{"first", "second"}, out[1].AllocResources.Networks[0].DynamicPorts[0].HostNetwork) - require.Contains([]string{"first", "second"}, out[1].AllocResources.Networks[0].DynamicPorts[1].HostNetwork) + must.SliceContains(t, []string{"public", "private"}, out[0].AllocResources.Networks[0].DynamicPorts[0].HostNetwork) + must.SliceContains(t, []string{"public", "private"}, out[0].AllocResources.Networks[0].DynamicPorts[1].HostNetwork) + must.SliceContains(t, []string{"first", "second"}, out[1].AllocResources.Networks[0].DynamicPorts[0].HostNetwork) + must.SliceContains(t, []string{"first", "second"}, out[1].AllocResources.Networks[0].DynamicPorts[1].HostNetwork) } // Tests that bin packing iterator fails due to absence of meta value @@ -1072,8 +1068,7 @@ func TestBinPackIterator_Host_Network_Interpolation_Absent_Value(t *testing.T) { scoreNorm := NewScoreNormalizationIterator(ctx, binp) out := collectRanked(scoreNorm) - require := require.New(t) - require.Len(out, 0) + must.Len(t, 0, out) } // Tests that bin packing iterator fails due to absence of meta value @@ -1173,8 +1168,7 @@ func TestBinPackIterator_Host_Network_Interpolation_Interface_Not_Exists(t *test scoreNorm := NewScoreNormalizationIterator(ctx, binp) out := collectRanked(scoreNorm) - require := require.New(t) - require.Len(out, 0) + must.Len(t, 0, out) } func TestBinPackIterator_PlannedAlloc(t *testing.T) { @@ -1377,9 +1371,9 @@ func TestBinPackIterator_ReservedCores(t *testing.T) { ClientStatus: structs.AllocClientStatusPending, TaskGroup: "web", } - require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) - require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + must.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) + must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) taskGroup := &structs.TaskGroup{ EphemeralDisk: &structs.EphemeralDisk{}, @@ -1403,10 +1397,9 @@ func TestBinPackIterator_ReservedCores(t *testing.T) { scoreNorm := NewScoreNormalizationIterator(ctx, binp) out := collectRanked(scoreNorm) - require := require.New(t) - require.Len(out, 1) - require.Equal(nodes[1].Node.ID, out[0].Node.ID) - require.Equal([]uint16{1}, out[0].TaskResources["web"].Cpu.ReservedCores) + must.Len(t, 1, out) + must.Eq(t, nodes[1].Node.ID, out[0].Node.ID) + must.Eq(t, []uint16{1}, out[0].TaskResources["web"].Cpu.ReservedCores) } func TestBinPackIterator_ExistingAlloc(t *testing.T) { @@ -1489,9 +1482,9 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) { ClientStatus: structs.AllocClientStatusPending, TaskGroup: "web", } - require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) - require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + must.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) + must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) taskGroup := &structs.TaskGroup{ EphemeralDisk: &structs.EphemeralDisk{}, @@ -1603,9 +1596,9 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) { ClientStatus: structs.AllocClientStatusPending, TaskGroup: "web", } - require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) - require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + must.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) + must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) // Add a planned eviction to alloc1 plan := ctx.Plan() @@ -2319,13 +2312,12 @@ func TestNodeAntiAffinity_PenaltyNodes(t *testing.T) { out := collectRanked(scoreNorm) - require := require.New(t) - require.Equal(2, len(out)) - require.Equal(node1.ID, out[0].Node.ID) - require.Equal(-1.0, out[0].FinalScore) + must.Eq(t, 2, len(out)) + must.Eq(t, node1.ID, out[0].Node.ID) + must.Eq(t, -1.0, out[0].FinalScore) - require.Equal(node2.ID, out[1].Node.ID) - require.Equal(0.0, out[1].FinalScore) + must.Eq(t, node2.ID, out[1].Node.ID) + must.Eq(t, 0.0, out[1].FinalScore) } @@ -2383,15 +2375,14 @@ func TestScoreNormalizationIterator(t *testing.T) { scoreNorm := NewScoreNormalizationIterator(ctx, nodeReschedulePenaltyIter) out := collectRanked(scoreNorm) - require := require.New(t) - require.Equal(2, len(out)) - require.Equal(out[0], nodes[0]) + must.Eq(t, 2, len(out)) + must.Eq(t, nodes[0], out[0]) // Score should be averaged between both scorers // -0.75 from job anti affinity and -1 from node rescheduling penalty - require.Equal(-0.875, out[0].FinalScore) - require.Equal(out[1], nodes[1]) - require.Equal(out[1].FinalScore, 0.0) + must.Eq(t, -0.875, out[0].FinalScore) + must.Eq(t, nodes[1], out[1]) + must.Eq(t, 0.0, out[1].FinalScore) } func TestNodeAffinityIterator(t *testing.T) { diff --git a/scheduler/reconcile_test.go b/scheduler/reconcile_test.go index f73e2af6e..12f5937cf 100644 --- a/scheduler/reconcile_test.go +++ b/scheduler/reconcile_test.go @@ -5,7 +5,6 @@ package scheduler import ( "fmt" - "reflect" "regexp" "strconv" "testing" @@ -277,10 +276,9 @@ func assertResults(t *testing.T, r *reconcileResults, exp *resultExpectation) { } else if exp.createDeployment != nil && r.deployment != nil { // Clear the deployment ID r.deployment.ID, exp.createDeployment.ID = "", "" - if !reflect.DeepEqual(r.deployment, exp.createDeployment) { - t.Errorf("Unexpected createdDeployment; got\n %#v\nwant\n%#v\nDiff: %v", - r.deployment, exp.createDeployment, pretty.Diff(r.deployment, exp.createDeployment)) - } + must.Eq(t, exp.createDeployment, r.deployment, must.Sprintf( + "Unexpected createdDeployment; got\n %#v\nwant\n%#v\nDiff: %v", + r.deployment, exp.createDeployment, pretty.Diff(r.deployment, exp.createDeployment))) } test.Eq(t, exp.deploymentUpdates, r.deploymentUpdates, test.Sprint("Expected Deployment Updates")) diff --git a/scheduler/scheduler_sysbatch_test.go b/scheduler/scheduler_sysbatch_test.go index 63778e499..d1862a687 100644 --- a/scheduler/scheduler_sysbatch_test.go +++ b/scheduler/scheduler_sysbatch_test.go @@ -17,7 +17,6 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/kr/pretty" "github.com/shoenig/test/must" - "github.com/stretchr/testify/require" ) func TestSysBatch_JobRegister(t *testing.T) { @@ -30,7 +29,7 @@ func TestSysBatch_JobRegister(t *testing.T) { // Create a job job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -41,33 +40,33 @@ func TestSysBatch_JobRegister(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan does not have annotations - require.Nil(t, plan.Annotations, "expected no annotations") + must.Nil(t, plan.Annotations, must.Sprint("expected no annotations")) // Ensure the plan allocated var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Len(t, planned, 10) + must.Len(t, 10, planned) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed - require.Len(t, out, 10) + must.Len(t, 10, out) // Note that all sysbatch allocations have the same name derived from Job.Name allocNames := helper.ConvertSlice(out, @@ -80,15 +79,15 @@ func TestSysBatch_JobRegister(t *testing.T) { // Check the available nodes count, ok := out[0].Metrics.NodesAvailable["dc1"] - require.True(t, ok) - require.Equal(t, 10, count, "bad metrics %#v:", out[0].Metrics) + must.True(t, ok) + must.Eq(t, 10, count, must.Sprintf("bad metrics %#v:", out[0].Metrics)) must.Eq(t, 10, out[0].Metrics.NodesInPool, must.Sprint("expected NodesInPool metric to be set")) // Ensure no allocations are queued queued := h.Evals[0].QueuedAllocations["my-sysbatch"] - require.Equal(t, 0, queued, "unexpected queued allocations") + must.Eq(t, 0, queued, must.Sprint("unexpected queued allocations")) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -103,7 +102,7 @@ func TestSysBatch_JobRegister_AddNode_Running(t *testing.T) { // Generate a fake sysbatch job with allocations job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -115,11 +114,11 @@ func TestSysBatch_JobRegister_AddNode_Running(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusRunning allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a new node. node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -130,14 +129,14 @@ func TestSysBatch_JobRegister_AddNode_Running(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan had no node updates @@ -145,27 +144,27 @@ func TestSysBatch_JobRegister_AddNode_Running(t *testing.T) { for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } - require.Empty(t, update) + must.SliceLen(t, 0, update) // Ensure the plan allocated on the new node var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Len(t, planned, 1) + must.Len(t, 1, planned) // Ensure it allocated on the right node _, ok := plan.NodeAllocation[node.ID] - require.True(t, ok, "allocated on wrong node: %#v", plan) + must.True(t, ok, must.Sprintf("allocated on wrong node: %#v", plan)) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) - require.Len(t, out, 11) + must.Len(t, 11, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -181,7 +180,7 @@ func TestSysBatch_JobRegister_AddNode_Dead(t *testing.T) { // Generate a dead sysbatch job with complete allocations job := mock.SystemBatchJob() job.Status = structs.JobStatusDead // job is dead but not stopped - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -193,11 +192,11 @@ func TestSysBatch_JobRegister_AddNode_Dead(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a new node. node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -208,14 +207,14 @@ func TestSysBatch_JobRegister_AddNode_Dead(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan has no node update @@ -223,27 +222,27 @@ func TestSysBatch_JobRegister_AddNode_Dead(t *testing.T) { for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } - require.Len(t, update, 0) + must.Len(t, 0, update) // Ensure the plan allocates on the new node var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Len(t, planned, 1) + must.Len(t, 1, planned) // Ensure it allocated on the right node _, ok := plan.NodeAllocation[node.ID] - require.True(t, ok, "allocated on wrong node: %#v", plan) + must.True(t, ok, must.Sprintf("allocated on wrong node: %#v", plan)) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure 1 non-terminal allocation live, _ := structs.FilterTerminalAllocs(out) - require.Len(t, live, 1) + must.Len(t, 1, live) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -258,7 +257,7 @@ func TestSysBatch_JobModify(t *testing.T) { // Generate a fake job with allocations job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -270,7 +269,7 @@ func TestSysBatch_JobModify(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusPending allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a few terminal status allocations, these should be reinstated var terminal []*structs.Allocation @@ -283,7 +282,7 @@ func TestSysBatch_JobModify(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete terminal = append(terminal, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) // Update the job job2 := mock.SystemBatchJob() @@ -291,7 +290,7 @@ func TestSysBatch_JobModify(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -302,14 +301,14 @@ func TestSysBatch_JobModify(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted all allocs @@ -317,23 +316,23 @@ func TestSysBatch_JobModify(t *testing.T) { for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } - require.Equal(t, len(allocs), len(update)) + must.Eq(t, len(allocs), len(update)) // Ensure the plan allocated var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Len(t, planned, 10) + must.Len(t, 10, planned) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) - require.Len(t, out, 10) + must.Len(t, 10, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -347,7 +346,7 @@ func TestSysBatch_JobModify_InPlace(t *testing.T) { nodes := createNodes(t, h, 10) job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -358,12 +357,12 @@ func TestSysBatch_JobModify_InPlace(t *testing.T) { alloc.Name = "my-sysbatch.pinger[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.SystemBatchJob() job2.ID = job.ID - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with update eval := &structs.Evaluation{ @@ -374,14 +373,14 @@ func TestSysBatch_JobModify_InPlace(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan did not evict any allocs @@ -389,26 +388,26 @@ func TestSysBatch_JobModify_InPlace(t *testing.T) { for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } - require.Empty(t, update) + must.SliceLen(t, 0, update) // Ensure the plan updated the existing allocs var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Len(t, planned, 10) + must.Len(t, 10, planned) for _, p := range planned { - require.Equal(t, job2, p.Job, "should update job") + must.Eq(t, job2, p.Job, must.Sprint("should update job")) } // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed - require.Len(t, out, 10) + must.Len(t, 10, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -433,9 +432,9 @@ func TestSysBatch_JobDeregister_Purged(t *testing.T) { allocs = append(allocs, alloc) } for _, alloc := range allocs { - require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSysBatchSummary(alloc.JobID))) + must.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSysBatchSummary(alloc.JobID))) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -446,29 +445,29 @@ func TestSysBatch_JobDeregister_Purged(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted the job from all nodes. for _, node := range nodes { - require.Len(t, plan.NodeUpdate[node.ID], 1) + must.Len(t, 1, plan.NodeUpdate[node.ID]) } // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure no remaining allocations out, _ = structs.FilterTerminalAllocs(out) - require.Empty(t, out) + must.SliceLen(t, 0, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -484,7 +483,7 @@ func TestSysBatch_JobDeregister_Stopped(t *testing.T) { // Generate a stopped sysbatch job with allocations job := mock.SystemBatchJob() job.Stop = true - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -496,9 +495,9 @@ func TestSysBatch_JobDeregister_Stopped(t *testing.T) { allocs = append(allocs, alloc) } for _, alloc := range allocs { - require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSysBatchSummary(alloc.JobID))) + must.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSysBatchSummary(alloc.JobID))) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -509,29 +508,29 @@ func TestSysBatch_JobDeregister_Stopped(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted the job from all nodes. for _, node := range nodes { - require.Len(t, plan.NodeUpdate[node.ID], 1) + must.Len(t, 1, plan.NodeUpdate[node.ID]) } // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure no remaining allocations out, _ = structs.FilterTerminalAllocs(out) - require.Empty(t, out) + must.SliceLen(t, 0, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -544,11 +543,11 @@ func TestSysBatch_NodeDown(t *testing.T) { // Register a down node node := mock.Node() node.Status = structs.NodeStatusDown - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a sysbatch job allocated on that node job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.SysBatchAlloc() alloc.Job = job @@ -556,7 +555,7 @@ func TestSysBatch_NodeDown(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -568,29 +567,29 @@ func TestSysBatch_NodeDown(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted all allocs - require.Len(t, plan.NodeUpdate[node.ID], 1) + must.Len(t, 1, plan.NodeUpdate[node.ID]) // Ensure the plan updated the allocation. planned := make([]*structs.Allocation, 0) for _, allocList := range plan.NodeUpdate { planned = append(planned, allocList...) } - require.Len(t, planned, 1) + must.Len(t, 1, planned) // Ensure the allocations is stopped p := planned[0] - require.Equal(t, structs.AllocDesiredStatusStop, p.DesiredStatus) + must.Eq(t, structs.AllocDesiredStatusStop, p.DesiredStatus) // removed badly designed assertion on client_status = lost // the actual client_status is pending @@ -605,18 +604,18 @@ func TestSysBatch_NodeDrain_Down(t *testing.T) { // Register a draining node node := mock.DrainNode() node.Status = structs.NodeStatusDown - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a sysbatch job allocated on that node. job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.SysBatchAlloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -628,25 +627,25 @@ func TestSysBatch_NodeDrain_Down(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted non terminal allocs - require.Len(t, plan.NodeUpdate[node.ID], 1) + must.Len(t, 1, plan.NodeUpdate[node.ID]) // Ensure that the allocation is marked as lost var lost []string for _, alloc := range plan.NodeUpdate[node.ID] { lost = append(lost, alloc.ID) } - require.Equal(t, []string{alloc.ID}, lost) + must.Eq(t, []string{alloc.ID}, lost) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -658,11 +657,11 @@ func TestSysBatch_NodeDrain(t *testing.T) { // Register a draining node node := mock.DrainNode() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a sysbatch job allocated on that node. job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.SysBatchAlloc() alloc.Job = job @@ -670,7 +669,7 @@ func TestSysBatch_NodeDrain(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -682,28 +681,28 @@ func TestSysBatch_NodeDrain(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted all allocs - require.Len(t, plan.NodeUpdate[node.ID], 1) + must.Len(t, 1, plan.NodeUpdate[node.ID]) // Ensure the plan updated the allocation. planned := make([]*structs.Allocation, 0) for _, allocList := range plan.NodeUpdate { planned = append(planned, allocList...) } - require.Len(t, planned, 1) + must.Len(t, 1, planned) // Ensure the allocations is stopped - require.Equal(t, structs.AllocDesiredStatusStop, planned[0].DesiredStatus) + must.Eq(t, structs.AllocDesiredStatusStop, planned[0].DesiredStatus) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -715,18 +714,18 @@ func TestSysBatch_NodeUpdate(t *testing.T) { // Register a node node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a sysbatch job allocated on that node. job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.SysBatchAlloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-system.pinger[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -738,16 +737,16 @@ func TestSysBatch_NodeUpdate(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure that queued allocations is zero val, ok := h.Evals[0].QueuedAllocations["pinger"] - require.True(t, ok) - require.Zero(t, val) + must.True(t, ok) + must.Zero(t, val) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -763,7 +762,7 @@ func TestSysBatch_RetryLimit(t *testing.T) { // Create a job job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register eval := &structs.Evaluation{ @@ -774,22 +773,22 @@ func TestSysBatch_RetryLimit(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure multiple plans - require.NotEmpty(t, h.Plans) + must.SliceNotEmpty(t, h.Plans) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure no allocations placed - require.Empty(t, out) + must.SliceLen(t, 0, out) // Should hit the retry limit h.AssertEvalStatus(t, structs.EvalStatusFailed) @@ -814,7 +813,7 @@ func TestSysBatch_Queued_With_Constraints(t *testing.T) { Operand: "=", }, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -825,23 +824,23 @@ func TestSysBatch_Queued_With_Constraints(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure that queued allocations is zero val, ok := h.Evals[0].QueuedAllocations["pinger"] - require.True(t, ok) - require.Zero(t, val) + must.True(t, ok) + must.Zero(t, val) failedTGAllocs := h.Evals[0].FailedTGAllocs pretty.Println(failedTGAllocs) - require.NotNil(t, failedTGAllocs) - require.Contains(t, failedTGAllocs, "pinger") - require.Equal(t, len(nodes), failedTGAllocs["pinger"].NodesEvaluated) - require.Equal(t, len(nodes), failedTGAllocs["pinger"].NodesFiltered) + must.NotNil(t, failedTGAllocs) + must.MapContainsKey(t, failedTGAllocs, "pinger") + must.Eq(t, len(nodes), failedTGAllocs["pinger"].NodesEvaluated) + must.Eq(t, len(nodes), failedTGAllocs["pinger"].NodesFiltered) } @@ -856,12 +855,12 @@ func TestSysBatch_Queued_With_Constraints_PartialMatch(t *testing.T) { node := mock.Node() node.Attributes["kernel.name"] = "darwin" node.ComputeClass() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Generate a sysbatch job which can't be placed on the node job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -872,11 +871,11 @@ func TestSysBatch_Queued_With_Constraints_PartialMatch(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) foundNodes := map[string]bool{} for n := range h.Plans[0].NodeAllocation { @@ -887,7 +886,7 @@ func TestSysBatch_Queued_With_Constraints_PartialMatch(t *testing.T) { expected[n.ID] = true } - require.Equal(t, expected, foundNodes) + must.Eq(t, expected, foundNodes) } // This test ensures that the scheduler correctly ignores ineligible @@ -904,14 +903,14 @@ func TestSysBatch_JobConstraint_AddNode(t *testing.T) { var node *structs.Node node = mock.Node() node.NodeClass = "Class-A" - require.NoError(t, node.ComputeClass()) - require.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, node.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) var nodeB *structs.Node nodeB = mock.Node() nodeB.NodeClass = "Class-B" - require.NoError(t, nodeB.ComputeClass()) - require.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), nodeB)) + must.NoError(t, nodeB.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), nodeB)) // Make a sysbatch job with two task groups, each constraint to a node class job := mock.SystemBatchJob() @@ -932,7 +931,7 @@ func TestSysBatch_JobConstraint_AddNode(t *testing.T) { // Upsert Job job.TaskGroups = []*structs.TaskGroup{tgA, tgB} - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Evaluate the job eval := &structs.Evaluation{ @@ -943,24 +942,24 @@ func TestSysBatch_JobConstraint_AddNode(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation - require.Nil(t, h.Process(NewSysBatchScheduler, eval)) - require.Equal(t, "complete", h.Evals[0].Status) + must.NoError(t, h.Process(NewSysBatchScheduler, eval)) + must.Eq(t, "complete", h.Evals[0].Status) // QueuedAllocations is drained val, ok := h.Evals[0].QueuedAllocations["groupA"] - require.True(t, ok) - require.Equal(t, 0, val) + must.True(t, ok) + must.Eq(t, 0, val) val, ok = h.Evals[0].QueuedAllocations["groupB"] - require.True(t, ok) - require.Equal(t, 0, val) + must.True(t, ok) + must.Eq(t, 0, val) // Single plan with two NodeAllocations - require.Len(t, h.Plans, 1) - require.Len(t, h.Plans[0].NodeAllocation, 2) + must.Len(t, 1, h.Plans) + must.MapLen(t, 2, h.Plans[0].NodeAllocation) // Mark the node as ineligible node.SchedulingEligibility = structs.NodeSchedulingIneligible @@ -975,27 +974,27 @@ func TestSysBatch_JobConstraint_AddNode(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval2})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval2})) // Process the 2nd evaluation - require.Nil(t, h.Process(NewSysBatchScheduler, eval2)) - require.Equal(t, "complete", h.Evals[1].Status) + must.NoError(t, h.Process(NewSysBatchScheduler, eval2)) + must.Eq(t, "complete", h.Evals[1].Status) // Ensure no new plans - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) // Ensure all NodeAllocations are from first Eval for _, allocs := range h.Plans[0].NodeAllocation { - require.Len(t, allocs, 1) - require.Equal(t, eval.ID, allocs[0].EvalID) + must.Len(t, 1, allocs) + must.Eq(t, eval.ID, allocs[0].EvalID) } // Add a new node Class-B var nodeBTwo *structs.Node nodeBTwo = mock.Node() nodeBTwo.NodeClass = "Class-B" - require.NoError(t, nodeBTwo.ComputeClass()) - require.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), nodeBTwo)) + must.NoError(t, nodeBTwo.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), nodeBTwo)) // Evaluate the new node eval3 := &structs.Evaluation{ @@ -1009,31 +1008,31 @@ func TestSysBatch_JobConstraint_AddNode(t *testing.T) { } // Ensure 3rd eval is complete - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval3})) - require.Nil(t, h.Process(NewSysBatchScheduler, eval3)) - require.Equal(t, "complete", h.Evals[2].Status) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval3})) + must.NoError(t, h.Process(NewSysBatchScheduler, eval3)) + must.Eq(t, "complete", h.Evals[2].Status) - require.Len(t, h.Plans, 2) - require.Len(t, h.Plans[1].NodeAllocation, 1) + must.Len(t, 2, h.Plans) + must.MapLen(t, 1, h.Plans[1].NodeAllocation) // Ensure all NodeAllocations are from first Eval for _, allocs := range h.Plans[1].NodeAllocation { - require.Len(t, allocs, 1) - require.Equal(t, eval3.ID, allocs[0].EvalID) + must.Len(t, 1, allocs) + must.Eq(t, eval3.ID, allocs[0].EvalID) } ws := memdb.NewWatchSet() allocsNodeOne, err := h.State.AllocsByNode(ws, node.ID) - require.NoError(t, err) - require.Len(t, allocsNodeOne, 1) + must.NoError(t, err) + must.Len(t, 1, allocsNodeOne) allocsNodeTwo, err := h.State.AllocsByNode(ws, nodeB.ID) - require.NoError(t, err) - require.Len(t, allocsNodeTwo, 1) + must.NoError(t, err) + must.Len(t, 1, allocsNodeTwo) allocsNodeThree, err := h.State.AllocsByNode(ws, nodeBTwo.ID) - require.NoError(t, err) - require.Len(t, allocsNodeThree, 1) + must.NoError(t, err) + must.Len(t, 1, allocsNodeThree) } func TestSysBatch_JobConstraint_AllFiltered(t *testing.T) { @@ -1159,13 +1158,13 @@ func TestSysBatch_ExistingAllocNoNodes(t *testing.T) { var node *structs.Node // Create a node node = mock.Node() - require.NoError(t, node.ComputeClass()) - require.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, node.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Make a sysbatch job job := mock.SystemBatchJob() job.Meta = map[string]string{"version": "1"} - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Evaluate the job eval := &structs.Evaluation{ @@ -1177,17 +1176,17 @@ func TestSysBatch_ExistingAllocNoNodes(t *testing.T) { Status: structs.EvalStatusPending, } - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) - require.Nil(t, h.Process(NewSysBatchScheduler, eval)) - require.Equal(t, "complete", h.Evals[0].Status) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.Process(NewSysBatchScheduler, eval)) + must.Eq(t, "complete", h.Evals[0].Status) // QueuedAllocations is drained val, ok := h.Evals[0].QueuedAllocations["pinger"] - require.True(t, ok) - require.Equal(t, 0, val) + must.True(t, ok) + must.Eq(t, 0, val) // The plan has one NodeAllocations - require.Equal(t, 1, len(h.Plans)) + must.Eq(t, 1, len(h.Plans)) // Mark the node as ineligible node.SchedulingEligibility = structs.NodeSchedulingIneligible @@ -1202,14 +1201,14 @@ func TestSysBatch_ExistingAllocNoNodes(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval2})) - require.Nil(t, h.Process(NewSysBatchScheduler, eval2)) - require.Equal(t, "complete", h.Evals[1].Status) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval2})) + must.NoError(t, h.Process(NewSysBatchScheduler, eval2)) + must.Eq(t, "complete", h.Evals[1].Status) // Create a new job version, deploy job2 := job.Copy() job2.Meta["version"] = "2" - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Run evaluation as a plan eval3 := &structs.Evaluation{ @@ -1223,13 +1222,13 @@ func TestSysBatch_ExistingAllocNoNodes(t *testing.T) { } // Ensure New eval is complete - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval3})) - require.Nil(t, h.Process(NewSysBatchScheduler, eval3)) - require.Equal(t, "complete", h.Evals[2].Status) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval3})) + must.NoError(t, h.Process(NewSysBatchScheduler, eval3)) + must.Eq(t, "complete", h.Evals[2].Status) // Ensure there are no FailedTGAllocs - require.Equal(t, 0, len(h.Evals[2].FailedTGAllocs)) - require.Equal(t, 0, h.Evals[2].QueuedAllocations[job2.Name]) + must.Eq(t, 0, len(h.Evals[2].FailedTGAllocs)) + must.Eq(t, 0, h.Evals[2].QueuedAllocations[job2.Name]) } func TestSysBatch_ConstraintErrors(t *testing.T) { @@ -1244,8 +1243,8 @@ func TestSysBatch_ConstraintErrors(t *testing.T) { for _, tag := range []string{"aaaaaa", "foo", "foo", "foo"} { node = mock.Node() node.Meta["tag"] = tag - require.NoError(t, node.ComputeClass()) - require.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, node.ComputeClass()) + must.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Mark the last node as ineligible @@ -1261,7 +1260,7 @@ func TestSysBatch_ConstraintErrors(t *testing.T) { Operand: "=", }) - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Evaluate the job eval := &structs.Evaluation{ @@ -1273,25 +1272,25 @@ func TestSysBatch_ConstraintErrors(t *testing.T) { Status: structs.EvalStatusPending, } - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) - require.Nil(t, h.Process(NewSysBatchScheduler, eval)) - require.Equal(t, "complete", h.Evals[0].Status) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.Process(NewSysBatchScheduler, eval)) + must.Eq(t, "complete", h.Evals[0].Status) // QueuedAllocations is drained val, ok := h.Evals[0].QueuedAllocations["pinger"] - require.True(t, ok) - require.Equal(t, 0, val) + must.True(t, ok) + must.Eq(t, 0, val) // The plan has two NodeAllocations - require.Equal(t, 1, len(h.Plans)) - require.Nil(t, h.Plans[0].Annotations) - require.Equal(t, 2, len(h.Plans[0].NodeAllocation)) + must.Eq(t, 1, len(h.Plans)) + must.Nil(t, h.Plans[0].Annotations) + must.Eq(t, 2, len(h.Plans[0].NodeAllocation)) // Two nodes were allocated and are pending. (unlike system jobs, sybatch // jobs are not auto set to running) ws := memdb.NewWatchSet() as, err := h.State.AllocsByJob(ws, structs.DefaultNamespace, job.ID, false) - require.Nil(t, err) + must.NoError(t, err) pending := 0 for _, a := range as { @@ -1300,11 +1299,11 @@ func TestSysBatch_ConstraintErrors(t *testing.T) { } } - require.Equal(t, 2, len(as)) - require.Equal(t, 2, pending) + must.Eq(t, 2, len(as)) + must.Eq(t, 2, pending) // Failed allocations is empty - require.Equal(t, 0, len(h.Evals[0].FailedTGAllocs)) + must.Eq(t, 0, len(h.Evals[0].FailedTGAllocs)) } func TestSysBatch_ChainedAlloc(t *testing.T) { @@ -1317,7 +1316,7 @@ func TestSysBatch_ChainedAlloc(t *testing.T) { // Create a sysbatch job job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1328,11 +1327,11 @@ func TestSysBatch_ChainedAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) var allocIDs []string for _, allocList := range h.Plans[0].NodeAllocation { @@ -1348,12 +1347,12 @@ func TestSysBatch_ChainedAlloc(t *testing.T) { job1.ID = job.ID job1.TaskGroups[0].Tasks[0].Env = make(map[string]string) job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" - require.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), nil, job1)) + must.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), nil, job1)) // Insert two more nodes for i := 0; i < 2; i++ { node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a mock evaluation to update the job @@ -1365,12 +1364,12 @@ func TestSysBatch_ChainedAlloc(t *testing.T) { JobID: job1.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1})) // Process the evaluation err = h1.Process(NewSysBatchScheduler, eval1) - require.NoError(t, err) + must.NoError(t, err) - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h1.Plans[0] // Collect all the chained allocation ids and the new allocations which @@ -1390,10 +1389,10 @@ func TestSysBatch_ChainedAlloc(t *testing.T) { // Ensure that the new allocations has their corresponding original // allocation ids - require.Equal(t, allocIDs, prevAllocs) + must.Eq(t, allocIDs, prevAllocs) // Ensuring two new allocations don't have any chained allocations - require.Len(t, newAllocs, 2) + must.Len(t, 2, newAllocs) } func TestSysBatch_PlanWithDrainedNode(t *testing.T) { @@ -1404,13 +1403,13 @@ func TestSysBatch_PlanWithDrainedNode(t *testing.T) { // Register two nodes with two different classes node := mock.DrainNode() node.NodeClass = "green" - require.NoError(t, node.ComputeClass()) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, node.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) node2 := mock.Node() node2.NodeClass = "blue" - require.NoError(t, node2.ComputeClass()) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) + must.NoError(t, node2.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) // Create a sysbatch job with two task groups, each constrained on node class job := mock.SystemBatchJob() @@ -1426,7 +1425,7 @@ func TestSysBatch_PlanWithDrainedNode(t *testing.T) { tg2.Name = "pinger2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create an allocation on each node alloc := mock.SysBatchAlloc() @@ -1443,7 +1442,7 @@ func TestSysBatch_PlanWithDrainedNode(t *testing.T) { alloc2.NodeID = node2.ID alloc2.Name = "my-sysbatch.pinger2[0]" alloc2.TaskGroup = "pinger2" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc, alloc2})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc, alloc2})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1455,25 +1454,25 @@ func TestSysBatch_PlanWithDrainedNode(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted the alloc on the failed node planned := plan.NodeUpdate[node.ID] - require.Len(t, plan.NodeUpdate[node.ID], 1) + must.Len(t, 1, plan.NodeUpdate[node.ID]) // Ensure the plan didn't place - require.Empty(t, plan.NodeAllocation) + must.MapEmpty(t, plan.NodeAllocation) // Ensure the allocations is stopped - require.Equal(t, structs.AllocDesiredStatusStop, planned[0].DesiredStatus) + must.Eq(t, structs.AllocDesiredStatusStop, planned[0].DesiredStatus) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -1486,13 +1485,13 @@ func TestSysBatch_QueuedAllocsMultTG(t *testing.T) { // Register two nodes with two different classes node := mock.Node() node.NodeClass = "green" - require.NoError(t, node.ComputeClass()) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, node.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) node2 := mock.Node() node2.NodeClass = "blue" - require.NoError(t, node2.ComputeClass()) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) + must.NoError(t, node2.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) // Create a sysbatch job with two task groups, each constrained on node class job := mock.SystemBatchJob() @@ -1508,7 +1507,7 @@ func TestSysBatch_QueuedAllocsMultTG(t *testing.T) { tg2.Name = "pinger2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1520,18 +1519,18 @@ func TestSysBatch_QueuedAllocsMultTG(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSysBatchScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) qa := h.Evals[0].QueuedAllocations - require.Zero(t, qa["pinger"]) - require.Zero(t, qa["pinger2"]) + must.Zero(t, qa["pinger"]) + must.Zero(t, qa["pinger2"]) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -1578,7 +1577,7 @@ func TestSysBatch_Preemption(t *testing.T) { }}, }}, } - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) nodes = append(nodes, node) } @@ -1588,7 +1587,7 @@ func TestSysBatch_Preemption(t *testing.T) { SysBatchSchedulerEnabled: true, }, }) - require.NoError(t, err) + must.NoError(t, err) // Create some low priority batch jobs and allocations for them // One job uses a reserved port @@ -1627,7 +1626,7 @@ func TestSysBatch_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)) job2 := mock.BatchJob() job2.Type = structs.JobTypeBatch @@ -1658,7 +1657,7 @@ func TestSysBatch_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) job3 := mock.Job() job3.Type = structs.JobTypeBatch @@ -1692,7 +1691,7 @@ func TestSysBatch_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc1, alloc2, alloc3})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc1, alloc2, alloc3})) // Create a high priority job and allocs for it // These allocs should not be preempted @@ -1735,8 +1734,8 @@ func TestSysBatch_Preemption(t *testing.T) { DiskMB: 2 * 1024, }, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job4)) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc4})) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job4)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc4})) // Create a system job such that it would need to preempt both allocs to succeed job := mock.SystemBatchJob() @@ -1749,7 +1748,7 @@ func TestSysBatch_Preemption(t *testing.T) { DynamicPorts: []structs.Port{{Label: "http"}}, }}, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1760,23 +1759,23 @@ func TestSysBatch_Preemption(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err = h.Process(NewSysBatchScheduler, eval) - require.Nil(t, err) + must.NoError(t, err) // Ensure a single plan - require.Equal(t, 1, len(h.Plans)) + must.Eq(t, 1, len(h.Plans)) plan := h.Plans[0] // Ensure the plan doesn't have annotations - require.Nil(t, plan.Annotations) + must.Nil(t, plan.Annotations) // Ensure the plan allocated on both nodes var planned []*structs.Allocation preemptingAllocId := "" - require.Equal(t, 2, len(plan.NodeAllocation)) + must.Eq(t, 2, len(plan.NodeAllocation)) // The alloc that got placed on node 1 is the preemptor for _, allocList := range plan.NodeAllocation { @@ -1791,34 +1790,34 @@ func TestSysBatch_Preemption(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed - require.Equal(t, 2, len(out)) + must.Eq(t, 2, len(out)) // Verify that one node has preempted allocs - require.NotNil(t, plan.NodePreemptions[nodes[0].ID]) + must.NotNil(t, plan.NodePreemptions[nodes[0].ID]) preemptedAllocs := plan.NodePreemptions[nodes[0].ID] // Verify that three jobs have preempted allocs - require.Equal(t, 3, len(preemptedAllocs)) + must.Eq(t, 3, len(preemptedAllocs)) expectedPreemptedJobIDs := []string{job1.ID, job2.ID, job3.ID} // We expect job1, job2 and job3 to have preempted allocations // job4 should not have any allocs preempted for _, alloc := range preemptedAllocs { - require.Contains(t, expectedPreemptedJobIDs, alloc.JobID) + must.SliceContains(t, expectedPreemptedJobIDs, alloc.JobID) } // Look up the preempted allocs by job ID ws = memdb.NewWatchSet() for _, jobId := range expectedPreemptedJobIDs { out, err = h.State.AllocsByJob(ws, structs.DefaultNamespace, jobId, false) - require.NoError(t, err) + must.NoError(t, err) for _, alloc := range out { - require.Equal(t, structs.AllocDesiredStatusEvict, alloc.DesiredStatus) - require.Equal(t, fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocId), alloc.DesiredDescription) + must.Eq(t, structs.AllocDesiredStatusEvict, alloc.DesiredStatus) + must.Eq(t, fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocId), alloc.DesiredDescription) } } @@ -1830,13 +1829,13 @@ func TestSysBatch_canHandle(t *testing.T) { s := SystemScheduler{sysbatch: true} t.Run("sysbatch register", func(t *testing.T) { - require.True(t, s.canHandle(structs.EvalTriggerJobRegister)) + must.True(t, s.canHandle(structs.EvalTriggerJobRegister)) }) t.Run("sysbatch scheduled", func(t *testing.T) { - require.False(t, s.canHandle(structs.EvalTriggerScheduled)) + must.False(t, s.canHandle(structs.EvalTriggerScheduled)) }) t.Run("sysbatch periodic", func(t *testing.T) { - require.True(t, s.canHandle(structs.EvalTriggerPeriodicJob)) + must.True(t, s.canHandle(structs.EvalTriggerPeriodicJob)) }) } func createNodes(t *testing.T, h *Harness, n int) []*structs.Node { @@ -1844,7 +1843,7 @@ func createNodes(t *testing.T, h *Harness, n int) []*structs.Node { for i := 0; i < n; i++ { node := mock.Node() nodes[i] = node - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } return nodes } diff --git a/scheduler/scheduler_system_test.go b/scheduler/scheduler_system_test.go index e1352d6cc..b306a534b 100644 --- a/scheduler/scheduler_system_test.go +++ b/scheduler/scheduler_system_test.go @@ -5,7 +5,6 @@ package scheduler import ( "fmt" - "reflect" "sort" "testing" "time" @@ -18,7 +17,6 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/shoenig/test/must" - "github.com/stretchr/testify/require" ) func TestSystemSched_JobRegister(t *testing.T) { @@ -31,7 +29,7 @@ func TestSystemSched_JobRegister(t *testing.T) { // Create a job job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -42,33 +40,33 @@ func TestSystemSched_JobRegister(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan does not have annotations - require.Nil(t, plan.Annotations, "expected no annotations") + must.Nil(t, plan.Annotations, must.Sprint("expected no annotations")) // Ensure the plan allocated var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Len(t, planned, 10) + must.Len(t, 10, planned) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed - require.Len(t, out, 10) + must.Len(t, 10, out) // Note that all system allocations have the same name derived from Job.Name allocNames := helper.ConvertSlice(out, @@ -81,15 +79,15 @@ func TestSystemSched_JobRegister(t *testing.T) { // Check the available nodes count, ok := out[0].Metrics.NodesAvailable["dc1"] - require.True(t, ok) - require.Equal(t, 10, count, "bad metrics %#v:", out[0].Metrics) + must.True(t, ok) + must.Eq(t, 10, count, must.Sprintf("bad metrics %#v:", out[0].Metrics)) must.Eq(t, 10, out[0].Metrics.NodesInPool, must.Sprint("expected NodesInPool metric to be set")) // Ensure no allocations are queued queued := h.Evals[0].QueuedAllocations["web"] - require.Equal(t, 0, queued, "unexpected queued allocations") + must.Eq(t, 0, queued, must.Sprint("unexpected queued allocations")) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -105,7 +103,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { // Create a job job := mock.SystemJob() job.TaskGroups[0].EphemeralDisk.Sticky = true - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -116,7 +114,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewSystemScheduler, eval); err != nil { @@ -136,7 +134,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { // Get an allocation and mark it as failed alloc := planned[4].Copy() alloc.ClientStatus = structs.AllocClientStatusFailed - require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to handle the update eval = &structs.Evaluation{ @@ -147,7 +145,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) h1 := NewHarnessWithState(t, h.State) if err := h1.Process(NewSystemScheduler, eval); err != nil { t.Fatalf("err: %v", err) @@ -176,18 +174,18 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { // Create a node node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a job job := mock.SystemJob() job.TaskGroups[0].EphemeralDisk.SizeMB = 60 * 1024 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create another job with a lot of disk resource ask so that it doesn't fit // the node job1 := mock.SystemJob() job1.TaskGroups[0].EphemeralDisk.SizeMB = 60 * 1024 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -198,7 +196,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewSystemScheduler, eval); err != nil { @@ -208,7 +206,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed if len(out) != 1 { @@ -226,7 +224,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { JobID: job1.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1})) // Process the evaluation if err := h1.Process(NewSystemScheduler, eval1); err != nil { @@ -234,7 +232,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { } out, err = h1.State.AllocsByJob(ws, job.Namespace, job1.ID, false) - require.NoError(t, err) + must.NoError(t, err) if len(out) != 0 { t.Fatalf("bad: %#v", out) } @@ -247,7 +245,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) { // Create a node node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Enable Preemption h.State.SchedulerSetConfig(h.NextIndex(), &structs.SchedulerConfiguration{ @@ -260,7 +258,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) { svcJob := mock.Job() svcJob.TaskGroups[0].Count = 1 svcJob.TaskGroups[0].Tasks[0].Resources.CPU = 13500 // mock.Node() has 14k - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, svcJob)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, svcJob)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -271,7 +269,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) { JobID: svcJob.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -280,7 +278,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) { // Create a system job job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval1 := &structs.Evaluation{ @@ -291,27 +289,25 @@ func TestSystemSched_ExhaustResources(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1})) // Process the evaluation if err := h.Process(NewSystemScheduler, eval1); err != nil { t.Fatalf("err: %v", err) } // System scheduler will preempt the service job and would have placed eval1 - require := require.New(t) - newPlan := h.Plans[1] - require.Len(newPlan.NodeAllocation, 1) - require.Len(newPlan.NodePreemptions, 1) + must.MapLen(t, 1, newPlan.NodeAllocation) + must.MapLen(t, 1, newPlan.NodePreemptions) for _, allocList := range newPlan.NodeAllocation { - require.Len(allocList, 1) - require.Equal(job.ID, allocList[0].JobID) + must.Len(t, 1, allocList) + must.Eq(t, job.ID, allocList[0].JobID) } for _, allocList := range newPlan.NodePreemptions { - require.Len(allocList, 1) - require.Equal(svcJob.ID, allocList[0].JobID) + must.Len(t, 1, allocList) + must.Eq(t, svcJob.ID, allocList[0].JobID) } // Ensure that we have no queued allocations on the second eval queued := h.Evals[1].QueuedAllocations["web"] @@ -334,7 +330,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { node.NodeClass = "bar" } node.ComputeClass() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a job constraining on node class @@ -345,7 +341,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { Operand: "==", } job.Constraints = append(job.Constraints, fooConstraint) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -357,7 +353,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { AnnotatePlan: true, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -383,7 +379,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed if len(out) != 9 { @@ -415,9 +411,8 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { } expected := &structs.DesiredUpdates{Place: 9} - if !reflect.DeepEqual(desiredChanges, expected) { - t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected) - } + must.Eq(t, desiredChanges, expected) + } func TestSystemSched_JobRegister_AddNode(t *testing.T) { @@ -430,7 +425,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { // Generate a fake job with allocations job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -441,11 +436,11 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a new node. node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -456,7 +451,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { @@ -464,7 +459,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { } // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan had no node updates @@ -472,14 +467,14 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } - require.Empty(t, update) + must.SliceEmpty(t, update) // Ensure the plan allocated on the new node var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Len(t, planned, 1) + must.Len(t, 1, planned) // Ensure it allocated on the right node if _, ok := plan.NodeAllocation[node.ID]; !ok { @@ -489,7 +484,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) @@ -508,7 +503,7 @@ func TestSystemSched_JobRegister_AllocFail(t *testing.T) { // Create NO nodes // Create a job job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -519,7 +514,7 @@ func TestSystemSched_JobRegister_AllocFail(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { @@ -544,7 +539,7 @@ func TestSystemSched_JobModify(t *testing.T) { // Generate a fake job with allocations job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -555,7 +550,7 @@ func TestSystemSched_JobModify(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation @@ -568,7 +563,7 @@ func TestSystemSched_JobModify(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop terminal = append(terminal, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal)) // Update the job job2 := mock.SystemJob() @@ -576,7 +571,7 @@ func TestSystemSched_JobModify(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -587,14 +582,14 @@ func TestSystemSched_JobModify(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted all allocs @@ -602,23 +597,23 @@ func TestSystemSched_JobModify(t *testing.T) { for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } - require.Equal(t, len(allocs), len(update)) + must.Eq(t, len(allocs), len(update)) // Ensure the plan allocated var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Len(t, planned, 10) + must.Len(t, 10, planned) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) - require.Len(t, out, 10) + must.Len(t, 10, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -633,7 +628,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { // Generate a fake job with allocations job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -644,7 +639,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.SystemJob() @@ -656,7 +651,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -667,7 +662,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { @@ -733,7 +728,7 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { // Generate a fake job with allocations job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -743,12 +738,12 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := mock.SystemJob() job2.ID = job.ID - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with update eval := &structs.Evaluation{ @@ -759,14 +754,14 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan did not evict any allocs @@ -774,33 +769,33 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } - require.Empty(t, update) + must.SliceEmpty(t, update) // Ensure the plan updated the existing allocs var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Len(t, planned, 10) + must.Len(t, 10, planned) for _, p := range planned { - require.Equal(t, job2, p.Job, "should update job") + must.Eq(t, job2, p.Job, must.Sprint("should update job")) } // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed - require.Len(t, out, 10) + must.Len(t, 10, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) // Verify the network did not change rp := structs.Port{Label: "admin", Value: 5000} for _, alloc := range out { for _, resources := range alloc.TaskResources { - require.Equal(t, rp, resources.Networks[0].ReservedPorts[0]) + must.Eq(t, rp, resources.Networks[0].ReservedPorts[0]) } } } @@ -813,18 +808,18 @@ func TestSystemSched_JobModify_RemoveDC(t *testing.T) { // Create some nodes node1 := mock.Node() node1.Datacenter = "dc1" - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1)) node2 := mock.Node() node2.Datacenter = "dc2" - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) nodes := []*structs.Node{node1, node2} // Generate a fake job with allocations job := mock.SystemJob() job.Datacenters = []string{"dc1", "dc2"} - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -835,12 +830,12 @@ func TestSystemSched_JobModify_RemoveDC(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Update the job job2 := job.Copy() job2.Datacenters = []string{"dc1"} - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with update eval := &structs.Evaluation{ @@ -851,14 +846,14 @@ func TestSystemSched_JobModify_RemoveDC(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan did not evict any allocs @@ -866,26 +861,26 @@ func TestSystemSched_JobModify_RemoveDC(t *testing.T) { for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } - require.Len(t, update, 1) + must.Len(t, 1, update) // Ensure the plan updated the existing allocs var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } - require.Len(t, planned, 1) + must.Len(t, 1, planned) for _, p := range planned { - require.Equal(t, job2, p.Job, "should update job") + must.Eq(t, job2, p.Job, must.Sprint("should update job")) } // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed - require.Len(t, out, 2) + must.Len(t, 2, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -911,9 +906,9 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) { allocs = append(allocs, alloc) } for _, alloc := range allocs { - require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) + must.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -924,29 +919,29 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted the job from all nodes. for _, node := range nodes { - require.Len(t, plan.NodeUpdate[node.ID], 1) + must.Len(t, 1, plan.NodeUpdate[node.ID]) } // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure no remaining allocations out, _ = structs.FilterTerminalAllocs(out) - require.Empty(t, out) + must.SliceEmpty(t, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -962,7 +957,7 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { // Generate a fake job with allocations job := mock.SystemJob() job.Stop = true - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -974,9 +969,9 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { allocs = append(allocs, alloc) } for _, alloc := range allocs { - require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) + must.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -987,29 +982,29 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted the job from all nodes. for _, node := range nodes { - require.Len(t, plan.NodeUpdate[node.ID], 1) + must.Len(t, 1, plan.NodeUpdate[node.ID]) } // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure no remaining allocations out, _ = structs.FilterTerminalAllocs(out) - require.Empty(t, out) + must.SliceEmpty(t, out) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -1022,11 +1017,11 @@ func TestSystemSched_NodeDown(t *testing.T) { // Register a down node node := mock.Node() node.Status = structs.NodeStatusDown - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a fake job allocated on that node. job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.Alloc() alloc.Job = job @@ -1034,7 +1029,7 @@ func TestSystemSched_NodeDown(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1046,29 +1041,29 @@ func TestSystemSched_NodeDown(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted all allocs - require.Len(t, plan.NodeUpdate[node.ID], 1) + must.Len(t, 1, plan.NodeUpdate[node.ID]) // Ensure the plan updated the allocation. planned := make([]*structs.Allocation, 0) for _, allocList := range plan.NodeUpdate { planned = append(planned, allocList...) } - require.Len(t, planned, 1) + must.Len(t, 1, planned) // Ensure the allocations is stopped p := planned[0] - require.Equal(t, structs.AllocDesiredStatusStop, p.DesiredStatus) + must.Eq(t, structs.AllocDesiredStatusStop, p.DesiredStatus) // removed badly designed assertion on client_status = lost // the actual client_status is pending @@ -1083,18 +1078,18 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) { // Register a draining node node := mock.DrainNode() node.Status = structs.NodeStatusDown - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a fake job allocated on that node. job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -1106,25 +1101,25 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) // todo: yikes - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted non terminal allocs - require.Len(t, plan.NodeUpdate[node.ID], 1) + must.Len(t, 1, plan.NodeUpdate[node.ID]) // Ensure that the allocation is marked as lost var lost []string for _, alloc := range plan.NodeUpdate[node.ID] { lost = append(lost, alloc.ID) } - require.Equal(t, []string{alloc.ID}, lost) + must.Eq(t, []string{alloc.ID}, lost) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -1136,11 +1131,11 @@ func TestSystemSched_NodeDrain(t *testing.T) { // Register a draining node node := mock.DrainNode() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a fake job allocated on that node. job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.Alloc() alloc.Job = job @@ -1148,7 +1143,7 @@ func TestSystemSched_NodeDrain(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1160,28 +1155,28 @@ func TestSystemSched_NodeDrain(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted all allocs - require.Len(t, plan.NodeUpdate[node.ID], 1) + must.Len(t, 1, plan.NodeUpdate[node.ID]) // Ensure the plan updated the allocation. planned := make([]*structs.Allocation, 0) for _, allocList := range plan.NodeUpdate { planned = append(planned, allocList...) } - require.Len(t, planned, 1) + must.Len(t, 1, planned) // Ensure the allocations is stopped - require.Equal(t, structs.AllocDesiredStatusStop, planned[0].DesiredStatus) + must.Eq(t, structs.AllocDesiredStatusStop, planned[0].DesiredStatus) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -1193,18 +1188,18 @@ func TestSystemSched_NodeUpdate(t *testing.T) { // Register a node node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a fake job allocated on that node. job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -1216,16 +1211,16 @@ func TestSystemSched_NodeUpdate(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure that queued allocations is zero val, ok := h.Evals[0].QueuedAllocations["web"] - require.True(t, ok) - require.Zero(t, val) + must.True(t, ok) + must.Zero(t, val) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -1241,7 +1236,7 @@ func TestSystemSched_RetryLimit(t *testing.T) { // Create a job job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1252,22 +1247,22 @@ func TestSystemSched_RetryLimit(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure multiple plans - require.NotEmpty(t, h.Plans) + must.SliceNotEmpty(t, h.Plans) // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure no allocations placed - require.Empty(t, out) + must.SliceEmpty(t, out) // Should hit the retry limit h.AssertEvalStatus(t, structs.EvalStatusFailed) @@ -1284,11 +1279,11 @@ func TestSystemSched_Queued_With_Constraints(t *testing.T) { // Register a node node := mock.Node() node.Attributes["kernel.name"] = "darwin" - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a system job which can't be placed on the node job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -1300,16 +1295,16 @@ func TestSystemSched_Queued_With_Constraints(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure that queued allocations is zero val, ok := h.Evals[0].QueuedAllocations["web"] - require.True(t, ok) - require.Zero(t, val) + must.True(t, ok) + must.Zero(t, val) } // This test ensures that the scheduler correctly ignores ineligible @@ -1326,14 +1321,14 @@ func TestSystemSched_JobConstraint_AddNode(t *testing.T) { var node *structs.Node node = mock.Node() node.NodeClass = "Class-A" - require.NoError(t, node.ComputeClass()) - require.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, node.ComputeClass()) + must.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) var nodeB *structs.Node nodeB = mock.Node() nodeB.NodeClass = "Class-B" - require.NoError(t, nodeB.ComputeClass()) - require.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), nodeB)) + must.NoError(t, nodeB.ComputeClass()) + must.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), nodeB)) // Make a job with two task groups, each constraint to a node class job := mock.SystemJob() @@ -1358,7 +1353,7 @@ func TestSystemSched_JobConstraint_AddNode(t *testing.T) { // Upsert Job job.TaskGroups = []*structs.TaskGroup{tgA, tgB} - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Evaluate the job eval := &structs.Evaluation{ @@ -1369,23 +1364,23 @@ func TestSystemSched_JobConstraint_AddNode(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) - require.Nil(t, h.Process(NewSystemScheduler, eval)) - require.Equal(t, "complete", h.Evals[0].Status) + must.Nil(t, h.Process(NewSystemScheduler, eval)) + must.Eq(t, "complete", h.Evals[0].Status) // QueuedAllocations is drained val, ok := h.Evals[0].QueuedAllocations["groupA"] - require.True(t, ok) - require.Equal(t, 0, val) + must.True(t, ok) + must.Eq(t, 0, val) val, ok = h.Evals[0].QueuedAllocations["groupB"] - require.True(t, ok) - require.Equal(t, 0, val) + must.True(t, ok) + must.Eq(t, 0, val) // Single plan with two NodeAllocations - require.Len(t, h.Plans, 1) - require.Len(t, h.Plans[0].NodeAllocation, 2) + must.Len(t, 1, h.Plans) + must.MapLen(t, 2, h.Plans[0].NodeAllocation) // Mark the node as ineligible node.SchedulingEligibility = structs.NodeSchedulingIneligible @@ -1401,25 +1396,25 @@ func TestSystemSched_JobConstraint_AddNode(t *testing.T) { Status: structs.EvalStatusPending, } - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval2})) - require.Nil(t, h.Process(NewSystemScheduler, eval2)) - require.Equal(t, "complete", h.Evals[1].Status) + must.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval2})) + must.Nil(t, h.Process(NewSystemScheduler, eval2)) + must.Eq(t, "complete", h.Evals[1].Status) // Ensure no new plans - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) // Ensure all NodeAllocations are from first Eval for _, allocs := range h.Plans[0].NodeAllocation { - require.Len(t, allocs, 1) - require.Equal(t, eval.ID, allocs[0].EvalID) + must.Len(t, 1, allocs) + must.Eq(t, eval.ID, allocs[0].EvalID) } // Add a new node Class-B var nodeBTwo *structs.Node nodeBTwo = mock.Node() nodeBTwo.NodeClass = "Class-B" - require.NoError(t, nodeBTwo.ComputeClass()) - require.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), nodeBTwo)) + must.NoError(t, nodeBTwo.ComputeClass()) + must.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), nodeBTwo)) // Evaluate the new node eval3 := &structs.Evaluation{ @@ -1433,31 +1428,31 @@ func TestSystemSched_JobConstraint_AddNode(t *testing.T) { } // Ensure New eval is complete - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval3})) - require.Nil(t, h.Process(NewSystemScheduler, eval3)) - require.Equal(t, "complete", h.Evals[2].Status) + must.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval3})) + must.Nil(t, h.Process(NewSystemScheduler, eval3)) + must.Eq(t, "complete", h.Evals[2].Status) - require.Len(t, h.Plans, 2) - require.Len(t, h.Plans[1].NodeAllocation, 1) + must.Len(t, 2, h.Plans) + must.MapLen(t, 1, h.Plans[1].NodeAllocation) // Ensure all NodeAllocations are from first Eval for _, allocs := range h.Plans[1].NodeAllocation { - require.Len(t, allocs, 1) - require.Equal(t, eval3.ID, allocs[0].EvalID) + must.Len(t, 1, allocs) + must.Eq(t, eval3.ID, allocs[0].EvalID) } ws := memdb.NewWatchSet() allocsNodeOne, err := h.State.AllocsByNode(ws, node.ID) - require.NoError(t, err) - require.Len(t, allocsNodeOne, 1) + must.NoError(t, err) + must.Len(t, 1, allocsNodeOne) allocsNodeTwo, err := h.State.AllocsByNode(ws, nodeB.ID) - require.NoError(t, err) - require.Len(t, allocsNodeTwo, 1) + must.NoError(t, err) + must.Len(t, 1, allocsNodeTwo) allocsNodeThree, err := h.State.AllocsByNode(ws, nodeBTwo.ID) - require.NoError(t, err) - require.Len(t, allocsNodeThree, 1) + must.NoError(t, err) + must.Len(t, 1, allocsNodeThree) } func TestSystemSched_JobConstraint_AllFiltered(t *testing.T) { @@ -1670,12 +1665,12 @@ func TestSystemSched_ExistingAllocNoNodes(t *testing.T) { var node *structs.Node // Create a node node = mock.Node() - require.NoError(t, node.ComputeClass()) - require.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, node.ComputeClass()) + must.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Make a job job := mock.SystemJob() - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Evaluate the job eval := &structs.Evaluation{ @@ -1687,17 +1682,17 @@ func TestSystemSched_ExistingAllocNoNodes(t *testing.T) { Status: structs.EvalStatusPending, } - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) - require.Nil(t, h.Process(NewSystemScheduler, eval)) - require.Equal(t, "complete", h.Evals[0].Status) + must.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.Nil(t, h.Process(NewSystemScheduler, eval)) + must.Eq(t, "complete", h.Evals[0].Status) // QueuedAllocations is drained val, ok := h.Evals[0].QueuedAllocations["web"] - require.True(t, ok) - require.Equal(t, 0, val) + must.True(t, ok) + must.Eq(t, 0, val) // The plan has one NodeAllocations - require.Equal(t, 1, len(h.Plans)) + must.Eq(t, 1, len(h.Plans)) // Mark the node as ineligible node.SchedulingEligibility = structs.NodeSchedulingIneligible @@ -1712,14 +1707,14 @@ func TestSystemSched_ExistingAllocNoNodes(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval2})) - require.Nil(t, h.Process(NewSystemScheduler, eval2)) - require.Equal(t, "complete", h.Evals[1].Status) + must.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval2})) + must.Nil(t, h.Process(NewSystemScheduler, eval2)) + must.Eq(t, "complete", h.Evals[1].Status) // Create a new job version, deploy job2 := job.Copy() job2.Meta["version"] = "2" - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Run evaluation as a plan eval3 := &structs.Evaluation{ @@ -1733,13 +1728,13 @@ func TestSystemSched_ExistingAllocNoNodes(t *testing.T) { } // Ensure New eval is complete - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval3})) - require.Nil(t, h.Process(NewSystemScheduler, eval3)) - require.Equal(t, "complete", h.Evals[2].Status) + must.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval3})) + must.Nil(t, h.Process(NewSystemScheduler, eval3)) + must.Eq(t, "complete", h.Evals[2].Status) // Ensure there are no FailedTGAllocs - require.Equal(t, 0, len(h.Evals[2].FailedTGAllocs)) - require.Equal(t, 0, h.Evals[2].QueuedAllocations[job2.Name]) + must.Eq(t, 0, len(h.Evals[2].FailedTGAllocs)) + must.Eq(t, 0, h.Evals[2].QueuedAllocations[job2.Name]) } // No errors reported when constraints prevent placement @@ -1755,8 +1750,8 @@ func TestSystemSched_ConstraintErrors(t *testing.T) { for _, tag := range []string{"aaaaaa", "foo", "foo", "foo"} { node = mock.Node() node.Meta["tag"] = tag - require.NoError(t, node.ComputeClass()) - require.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, node.ComputeClass()) + must.Nil(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Mark the last node as ineligible @@ -1771,7 +1766,7 @@ func TestSystemSched_ConstraintErrors(t *testing.T) { Operand: "=", }) - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Evaluate the job eval := &structs.Evaluation{ @@ -1783,24 +1778,24 @@ func TestSystemSched_ConstraintErrors(t *testing.T) { Status: structs.EvalStatusPending, } - require.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) - require.Nil(t, h.Process(NewSystemScheduler, eval)) - require.Equal(t, "complete", h.Evals[0].Status) + must.Nil(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.Nil(t, h.Process(NewSystemScheduler, eval)) + must.Eq(t, "complete", h.Evals[0].Status) // QueuedAllocations is drained val, ok := h.Evals[0].QueuedAllocations["web"] - require.True(t, ok) - require.Equal(t, 0, val) + must.True(t, ok) + must.Eq(t, 0, val) // The plan has two NodeAllocations - require.Equal(t, 1, len(h.Plans)) - require.Nil(t, h.Plans[0].Annotations) - require.Equal(t, 2, len(h.Plans[0].NodeAllocation)) + must.Eq(t, 1, len(h.Plans)) + must.Nil(t, h.Plans[0].Annotations) + must.Eq(t, 2, len(h.Plans[0].NodeAllocation)) // Two nodes were allocated and are running ws := memdb.NewWatchSet() as, err := h.State.AllocsByJob(ws, structs.DefaultNamespace, job.ID, false) - require.Nil(t, err) + must.NoError(t, err) running := 0 for _, a := range as { @@ -1809,11 +1804,11 @@ func TestSystemSched_ConstraintErrors(t *testing.T) { } } - require.Equal(t, 2, len(as)) - require.Equal(t, 2, running) + must.Eq(t, 2, len(as)) + must.Eq(t, 2, running) // Failed allocations is empty - require.Equal(t, 0, len(h.Evals[0].FailedTGAllocs)) + must.Eq(t, 0, len(h.Evals[0].FailedTGAllocs)) } func TestSystemSched_ChainedAlloc(t *testing.T) { @@ -1826,7 +1821,7 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { // Create a job job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1837,11 +1832,11 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) var allocIDs []string for _, allocList := range h.Plans[0].NodeAllocation { @@ -1857,12 +1852,12 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { job1.ID = job.ID job1.TaskGroups[0].Tasks[0].Env = make(map[string]string) job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" - require.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), nil, job1)) + must.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), nil, job1)) // Insert two more nodes for i := 0; i < 2; i++ { node := mock.Node() - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } // Create a mock evaluation to update the job @@ -1874,13 +1869,13 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { JobID: job1.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1})) // Process the evaluation if err := h1.Process(NewSystemScheduler, eval1); err != nil { t.Fatalf("err: %v", err) } - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h1.Plans[0] // Collect all the chained allocation ids and the new allocations which @@ -1900,10 +1895,10 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { // Ensure that the new allocations has their corresponding original // allocation ids - require.Equal(t, allocIDs, prevAllocs) + must.Eq(t, allocIDs, prevAllocs) // Ensuring two new allocations don't have any chained allocations - require.Len(t, newAllocs, 2) + must.Len(t, 2, newAllocs) } func TestSystemSched_PlanWithDrainedNode(t *testing.T) { @@ -1914,13 +1909,13 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { // Register two nodes with two different classes node := mock.DrainNode() node.NodeClass = "green" - require.NoError(t, node.ComputeClass()) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, node.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) node2 := mock.Node() node2.NodeClass = "blue" - require.NoError(t, node2.ComputeClass()) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) + must.NoError(t, node2.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) // Create a Job with two task groups, each constrained on node class job := mock.SystemJob() @@ -1936,7 +1931,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { tg2.Name = "web2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create an allocation on each node alloc := mock.Alloc() @@ -1953,7 +1948,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { alloc2.NodeID = node2.ID alloc2.Name = "my-job.web2[0]" alloc2.TaskGroup = "web2" - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc, alloc2})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc, alloc2})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1965,25 +1960,25 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) plan := h.Plans[0] // Ensure the plan evicted the alloc on the failed node planned := plan.NodeUpdate[node.ID] - require.Len(t, plan.NodeUpdate[node.ID], 1) + must.Len(t, 1, plan.NodeUpdate[node.ID]) // Ensure the plan didn't place - require.Empty(t, plan.NodeAllocation) + must.MapEmpty(t, plan.NodeAllocation) // Ensure the allocations is stopped - require.Equal(t, structs.AllocDesiredStatusStop, planned[0].DesiredStatus) + must.Eq(t, structs.AllocDesiredStatusStop, planned[0].DesiredStatus) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -1996,13 +1991,13 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { // Register two nodes with two different classes node := mock.Node() node.NodeClass = "green" - require.NoError(t, node.ComputeClass()) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, node.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) node2 := mock.Node() node2.NodeClass = "blue" - require.NoError(t, node2.ComputeClass()) - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) + must.NoError(t, node2.ComputeClass()) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2)) // Create a Job with two task groups, each constrained on node class job := mock.SystemJob() @@ -2018,7 +2013,7 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { tg2.Name = "web2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -2030,18 +2025,18 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, 1) + must.Len(t, 1, h.Plans) qa := h.Evals[0].QueuedAllocations - require.Zero(t, qa["pinger"]) - require.Zero(t, qa["pinger2"]) + must.Zero(t, qa["pinger"]) + must.Zero(t, qa["pinger2"]) h.AssertEvalStatus(t, structs.EvalStatusComplete) } @@ -2088,7 +2083,7 @@ func TestSystemSched_Preemption(t *testing.T) { }}, }}, } - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) nodes = append(nodes, node) } @@ -2098,7 +2093,7 @@ func TestSystemSched_Preemption(t *testing.T) { SystemSchedulerEnabled: true, }, }) - require.NoError(t, err) + must.NoError(t, err) // Create some low priority batch jobs and allocations for them // One job uses a reserved port @@ -2137,7 +2132,7 @@ func TestSystemSched_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)) job2 := mock.BatchJob() job2.Type = structs.JobTypeBatch @@ -2168,7 +2163,7 @@ func TestSystemSched_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) job3 := mock.Job() job3.Type = structs.JobTypeBatch @@ -2202,7 +2197,7 @@ func TestSystemSched_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc1, alloc2, alloc3})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc1, alloc2, alloc3})) // Create a high priority job and allocs for it // These allocs should not be preempted @@ -2245,8 +2240,8 @@ func TestSystemSched_Preemption(t *testing.T) { DiskMB: 2 * 1024, }, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job4)) - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc4})) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job4)) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc4})) // Create a system job such that it would need to preempt both allocs to succeed job := mock.SystemJob() @@ -2258,7 +2253,7 @@ func TestSystemSched_Preemption(t *testing.T) { DynamicPorts: []structs.Port{{Label: "http"}}, }}, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -2269,23 +2264,23 @@ func TestSystemSched_Preemption(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err = h.Process(NewSystemScheduler, eval) - require.Nil(t, err) + must.NoError(t, err) // Ensure a single plan - require.Equal(t, 1, len(h.Plans)) + must.Eq(t, 1, len(h.Plans)) plan := h.Plans[0] // Ensure the plan doesn't have annotations - require.Nil(t, plan.Annotations) + must.Nil(t, plan.Annotations) // Ensure the plan allocated on both nodes var planned []*structs.Allocation preemptingAllocId := "" - require.Equal(t, 2, len(plan.NodeAllocation)) + must.Eq(t, 2, len(plan.NodeAllocation)) // The alloc that got placed on node 1 is the preemptor for _, allocList := range plan.NodeAllocation { @@ -2300,34 +2295,34 @@ func TestSystemSched_Preemption(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - require.NoError(t, err) + must.NoError(t, err) // Ensure all allocations placed - require.Equal(t, 2, len(out)) + must.Eq(t, 2, len(out)) // Verify that one node has preempted allocs - require.NotNil(t, plan.NodePreemptions[nodes[0].ID]) + must.NotNil(t, plan.NodePreemptions[nodes[0].ID]) preemptedAllocs := plan.NodePreemptions[nodes[0].ID] // Verify that three jobs have preempted allocs - require.Equal(t, 3, len(preemptedAllocs)) + must.Eq(t, 3, len(preemptedAllocs)) expectedPreemptedJobIDs := []string{job1.ID, job2.ID, job3.ID} // We expect job1, job2 and job3 to have preempted allocations // job4 should not have any allocs preempted for _, alloc := range preemptedAllocs { - require.Contains(t, expectedPreemptedJobIDs, alloc.JobID) + must.SliceContains(t, expectedPreemptedJobIDs, alloc.JobID) } // Look up the preempted allocs by job ID ws = memdb.NewWatchSet() for _, jobId := range expectedPreemptedJobIDs { out, err = h.State.AllocsByJob(ws, structs.DefaultNamespace, jobId, false) - require.NoError(t, err) + must.NoError(t, err) for _, alloc := range out { - require.Equal(t, structs.AllocDesiredStatusEvict, alloc.DesiredStatus) - require.Equal(t, fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocId), alloc.DesiredDescription) + must.Eq(t, structs.AllocDesiredStatusEvict, alloc.DesiredStatus) + must.Eq(t, fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocId), alloc.DesiredDescription) } } @@ -2339,13 +2334,13 @@ func TestSystemSched_canHandle(t *testing.T) { s := SystemScheduler{sysbatch: false} t.Run("system register", func(t *testing.T) { - require.True(t, s.canHandle(structs.EvalTriggerJobRegister)) + must.True(t, s.canHandle(structs.EvalTriggerJobRegister)) }) t.Run("system scheduled", func(t *testing.T) { - require.False(t, s.canHandle(structs.EvalTriggerScheduled)) + must.False(t, s.canHandle(structs.EvalTriggerScheduled)) }) t.Run("system periodic", func(t *testing.T) { - require.False(t, s.canHandle(structs.EvalTriggerPeriodicJob)) + must.False(t, s.canHandle(structs.EvalTriggerPeriodicJob)) }) } @@ -3072,7 +3067,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { node.SchedulingEligibility = structs.NodeSchedulingIneligible } - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) // Generate a fake job allocated on that node. var job *structs.Job @@ -3085,7 +3080,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { job = sysBatchJob.Copy() alloc = sysBatchAlloc.Copy() default: - require.FailNow(t, "invalid jobType") + t.Fatalf("invalid jobType") } job.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ @@ -3101,7 +3096,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { job.Datacenters = []string{"not-targeted"} } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc.Job = job.Copy() alloc.JobID = job.ID @@ -3114,7 +3109,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { alloc.TaskStates = tc.taskState if tc.exists { - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) } if tc.modifyJob { @@ -3124,7 +3119,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { if tc.jobType == structs.JobTypeSysBatch { alloc.Job.TaskGroups[0].Tasks[0].Driver = "raw_exec" } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) } if tc.previousTerminal { @@ -3135,7 +3130,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { prev.ClientStatus = structs.AllocClientStatusComplete prev.DesiredStatus = structs.AllocDesiredStatusRun - require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{prev})) + must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{prev})) } // Create a mock evaluation to deal with disconnect eval := &structs.Evaluation{ @@ -3147,14 +3142,14 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) - require.NoError(t, err) + must.NoError(t, err) // Ensure a single plan - require.Len(t, h.Plans, tc.expectedPlanCount) + must.Len(t, tc.expectedPlanCount, h.Plans) if tc.expectedPlanCount == 0 { return } @@ -3162,8 +3157,8 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { plan := h.Plans[0] // Ensure the plan creates the expected plan - require.Len(t, plan.NodeAllocation[node.ID], len(tc.expectedNodeAllocation)) - require.Len(t, plan.NodeUpdate[node.ID], len(tc.expectedNodeUpdate)) + must.Len(t, len(tc.expectedNodeAllocation), plan.NodeAllocation[node.ID]) + must.Len(t, len(tc.expectedNodeUpdate), plan.NodeUpdate[node.ID]) foundMatch := false @@ -3180,7 +3175,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { } if len(tc.expectedNodeAllocation) > 0 { - require.True(t, foundMatch, "NodeAllocation did not match") + must.True(t, foundMatch, must.Sprint("NodeAllocation did not match")) } foundMatch = false @@ -3197,7 +3192,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { } if len(tc.expectedNodeUpdate) > 0 { - require.True(t, foundMatch, "NodeUpdate did not match") + must.True(t, foundMatch, must.Sprint("NodeUpdate did not match")) } h.AssertEvalStatus(t, structs.EvalStatusComplete) diff --git a/scheduler/select_test.go b/scheduler/select_test.go index 8e4946dba..5c1727e2e 100644 --- a/scheduler/select_test.go +++ b/scheduler/select_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" - "github.com/stretchr/testify/require" + "github.com/shoenig/test/must" ) func TestLimitIterator(t *testing.T) { @@ -313,12 +313,11 @@ func TestLimitIterator_ScoreThreshold(t *testing.T) { limit := NewLimitIterator(ctx, static, 1, 0, 2) limit.SetLimit(2) out := collectRanked(limit) - require := require.New(t) - require.Equal(tc.expectedOut, out) + must.Eq(t, tc.expectedOut, out) limit.Reset() - require.Equal(0, limit.skippedNodeIndex) - require.Equal(0, len(limit.skippedNodes)) + must.Eq(t, 0, limit.skippedNodeIndex) + must.Eq(t, 0, len(limit.skippedNodes)) }) } diff --git a/scheduler/spread_test.go b/scheduler/spread_test.go index f23ad1851..70725d630 100644 --- a/scheduler/spread_test.go +++ b/scheduler/spread_test.go @@ -13,7 +13,6 @@ import ( "github.com/shoenig/test" "github.com/shoenig/test/must" - "github.com/stretchr/testify/require" "github.com/hashicorp/go-set/v3" "github.com/hashicorp/nomad/ci" @@ -100,7 +99,7 @@ func TestSpreadIterator_SingleAttribute(t *testing.T) { "dc2": 0.5, } for _, rn := range out { - require.Equal(t, expectedScores[rn.Node.Datacenter], rn.FinalScore) + must.Eq(t, expectedScores[rn.Node.Datacenter], rn.FinalScore) } // Update the plan to add more allocs to nodes in dc1 @@ -179,7 +178,7 @@ func TestSpreadIterator_SingleAttribute(t *testing.T) { "dc2": 0.5, } for _, rn := range out { - require.Equal(t, expectedScores[rn.Node.Datacenter], rn.FinalScore) + must.Eq(t, expectedScores[rn.Node.Datacenter], rn.FinalScore) } } @@ -281,7 +280,7 @@ func TestSpreadIterator_MultipleAttributes(t *testing.T) { nodes[3].Node.ID: 0.556, } for _, rn := range out { - require.Equal(t, fmt.Sprintf("%.3f", expectedScores[rn.Node.ID]), fmt.Sprintf("%.3f", rn.FinalScore)) + must.Eq(t, fmt.Sprintf("%.3f", expectedScores[rn.Node.ID]), fmt.Sprintf("%.3f", rn.FinalScore)) } } @@ -328,7 +327,7 @@ func TestSpreadIterator_EvenSpread(t *testing.T) { "dc2": 0, } for _, rn := range out { - require.Equal(t, fmt.Sprintf("%.3f", expectedScores[rn.Node.Datacenter]), fmt.Sprintf("%.3f", rn.FinalScore)) + must.Eq(t, fmt.Sprintf("%.3f", expectedScores[rn.Node.Datacenter]), fmt.Sprintf("%.3f", rn.FinalScore)) } @@ -374,7 +373,7 @@ func TestSpreadIterator_EvenSpread(t *testing.T) { "dc2": 1, } for _, rn := range out { - require.Equal(t, expectedScores[rn.Node.Datacenter], rn.FinalScore) + must.Eq(t, expectedScores[rn.Node.Datacenter], rn.FinalScore) } // Update the plan to add more allocs to nodes in dc2 @@ -427,7 +426,7 @@ func TestSpreadIterator_EvenSpread(t *testing.T) { "dc2": -0.5, } for _, rn := range out { - require.Equal(t, fmt.Sprintf("%3.3f", expectedScores[rn.Node.Datacenter]), fmt.Sprintf("%3.3f", rn.FinalScore)) + must.Eq(t, fmt.Sprintf("%3.3f", expectedScores[rn.Node.Datacenter]), fmt.Sprintf("%3.3f", rn.FinalScore)) } // Add another node in dc3 @@ -470,7 +469,7 @@ func TestSpreadIterator_EvenSpread(t *testing.T) { "dc3": 1, } for _, rn := range out { - require.Equal(t, fmt.Sprintf("%.3f", expectedScores[rn.Node.Datacenter]), fmt.Sprintf("%.3f", rn.FinalScore)) + must.Eq(t, fmt.Sprintf("%.3f", expectedScores[rn.Node.Datacenter]), fmt.Sprintf("%.3f", rn.FinalScore)) } } @@ -525,7 +524,7 @@ func TestSpreadIterator_MaxPenalty(t *testing.T) { // All nodes are in dc3 so score should be -1 for _, rn := range out { - require.Equal(t, -1.0, rn.FinalScore) + must.Eq(t, -1.0, rn.FinalScore) } // Reset scores @@ -560,7 +559,7 @@ func TestSpreadIterator_MaxPenalty(t *testing.T) { // All nodes don't have the spread attribute so score should be -1 for _, rn := range out { - require.Equal(t, -1.0, rn.FinalScore) + must.Eq(t, -1.0, rn.FinalScore) } } @@ -685,8 +684,8 @@ func Test_evenSpreadScoreBoost(t *testing.T) { Datacenter: "dc2", } boost := evenSpreadScoreBoost(pset, opt) - require.False(t, math.IsInf(boost, 1)) - require.Equal(t, 1.0, boost) + must.False(t, math.IsInf(boost, 1)) + must.Eq(t, 1.0, boost) } // TestSpreadOnLargeCluster exercises potentially quadratic @@ -759,20 +758,20 @@ func TestSpreadOnLargeCluster(t *testing.T) { ci.Parallel(t) h := NewHarness(t) err := upsertNodes(h, tc.nodeCount, tc.racks) - require.NoError(t, err) + must.NoError(t, err) job := generateJob(tc.allocs) eval, err := upsertJob(h, job) - require.NoError(t, err) + must.NoError(t, err) start := time.Now() err = h.Process(NewServiceScheduler, eval) - require.NoError(t, err) - require.LessOrEqual(t, time.Since(start), time.Duration(60*time.Second), - "time to evaluate exceeded EvalNackTimeout") + must.NoError(t, err) + must.LessEq(t, time.Duration(60*time.Second), time.Since(start), + must.Sprint("time to evaluate exceeded EvalNackTimeout")) - require.Len(t, h.Plans, 1) - require.False(t, h.Plans[0].IsNoOp()) - require.NoError(t, validateEqualSpread(h)) + must.Len(t, 1, h.Plans) + must.False(t, h.Plans[0].IsNoOp()) + must.NoError(t, validateEqualSpread(h)) }) } } @@ -940,7 +939,7 @@ func TestSpreadPanicDowngrade(t *testing.T) { nodes = append(nodes, node) err := h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node) - require.NoError(t, err) + must.NoError(t, err) } // job version 1 @@ -974,7 +973,7 @@ func TestSpreadPanicDowngrade(t *testing.T) { job1.Version = 1 job1.TaskGroups[0].Count = 5 err := h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1) - require.NoError(t, err) + must.NoError(t, err) allocs := []*structs.Allocation{} for i := 0; i < 4; i++ { @@ -997,7 +996,7 @@ func TestSpreadPanicDowngrade(t *testing.T) { allocs = append(allocs, alloc) } err = h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs) - require.NoError(t, err) + must.NoError(t, err) // job version 2 // max_parallel = 0, canary = 1, spread == nil @@ -1006,7 +1005,7 @@ func TestSpreadPanicDowngrade(t *testing.T) { job2.Version = 2 job2.Spreads = nil err = h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2) - require.NoError(t, err) + must.NoError(t, err) eval := &structs.Evaluation{ Namespace: job2.Namespace, @@ -1018,11 +1017,11 @@ func TestSpreadPanicDowngrade(t *testing.T) { } err = h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}) - require.NoError(t, err) + must.NoError(t, err) processErr := h.Process(NewServiceScheduler, eval) - require.NoError(t, processErr, "failed to process eval") - require.Len(t, h.Plans, 1) + must.NoError(t, processErr, must.Sprintf("...")) + must.Len(t, 1, h.Plans) } func TestSpread_ImplicitTargets(t *testing.T) { diff --git a/scheduler/stack_test.go b/scheduler/stack_test.go index ebdda77c1..36df061c9 100644 --- a/scheduler/stack_test.go +++ b/scheduler/stack_test.go @@ -5,7 +5,6 @@ package scheduler import ( "fmt" - "reflect" "runtime" "testing" @@ -82,9 +81,8 @@ func TestServiceStack_SetNodes(t *testing.T) { } out := collectFeasible(stack.source) - if !reflect.DeepEqual(out, nodes) { - t.Fatalf("bad: %#v", out) - } + must.Eq(t, nodes, out) + } func TestServiceStack_SetJob(t *testing.T) { @@ -99,9 +97,7 @@ func TestServiceStack_SetJob(t *testing.T) { if stack.binPack.priority != job.Priority { t.Fatalf("bad") } - if !reflect.DeepEqual(stack.jobConstraint.constraints, job.Constraints) { - t.Fatalf("bad") - } + must.Eq(t, stack.jobConstraint.constraints, job.Constraints) } func TestServiceStack_Select_Size(t *testing.T) { diff --git a/scheduler/testing.go b/scheduler/testing.go index 472c3d12f..2500bcf85 100644 --- a/scheduler/testing.go +++ b/scheduler/testing.go @@ -9,13 +9,12 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-version" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" + "github.com/shoenig/test/must" ) // RejectPlan is used to always reject the entire plan and force a state refresh @@ -306,9 +305,9 @@ func (h *Harness) Process(factory Factory, eval *structs.Evaluation) error { } func (h *Harness) AssertEvalStatus(t testing.TB, state string) { - require.Len(t, h.Evals, 1) + must.Len(t, 1, h.Evals) update := h.Evals[0] - require.Equal(t, state, update.Status) + must.Eq(t, state, update.Status) } func (h *Harness) SetNoSubmit() { diff --git a/scheduler/util_test.go b/scheduler/util_test.go index 2d2193ce9..e7b99d240 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -4,7 +4,6 @@ package scheduler import ( - "reflect" "testing" "time" @@ -16,7 +15,6 @@ import ( "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" "github.com/shoenig/test/must" - "github.com/stretchr/testify/require" ) func BenchmarkTasksUpdated(b *testing.B) { @@ -136,8 +134,8 @@ func TestRetryMax(t *testing.T) { return false, nil } err := retryMax(3, bad, nil) - require.Error(t, err) - require.Equal(t, 3, calls, "mis match") + must.Error(t, err) + must.Eq(t, 3, calls) calls = 0 first := true @@ -149,8 +147,8 @@ func TestRetryMax(t *testing.T) { return false } err = retryMax(3, bad, reset) - require.Error(t, err) - require.Equal(t, 6, calls, "mis match") + must.Error(t, err) + must.Eq(t, 6, calls) calls = 0 good := func() (bool, error) { @@ -158,8 +156,8 @@ func TestRetryMax(t *testing.T) { return true, nil } err = retryMax(3, good, nil) - require.NoError(t, err) - require.Equal(t, 1, calls, "mis match") + must.NoError(t, err) + must.Eq(t, 1, calls) } func TestTaintedNodes(t *testing.T) { @@ -173,10 +171,10 @@ func TestTaintedNodes(t *testing.T) { node3.Datacenter = "dc2" node3.Status = structs.NodeStatusDown node4 := mock.DrainNode() - require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node1)) - require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1001, node2)) - require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, node3)) - require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1003, node4)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node1)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1001, node2)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, node3)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1003, node4)) allocs := []*structs.Allocation{ {NodeID: node1.ID}, @@ -186,19 +184,19 @@ func TestTaintedNodes(t *testing.T) { {NodeID: "12345678-abcd-efab-cdef-123456789abc"}, } tainted, err := taintedNodes(state, allocs) - require.NoError(t, err) - require.Equal(t, 3, len(tainted)) - require.NotContains(t, tainted, node1.ID) - require.NotContains(t, tainted, node2.ID) + must.NoError(t, err) + must.Eq(t, 3, len(tainted)) + must.MapNotContainsKey(t, tainted, node1.ID) + must.MapNotContainsKey(t, tainted, node2.ID) - require.Contains(t, tainted, node3.ID) - require.NotNil(t, tainted[node3.ID]) + must.MapContainsKey(t, tainted, node3.ID) + must.NotNil(t, tainted[node3.ID]) - require.Contains(t, tainted, node4.ID) - require.NotNil(t, tainted[node4.ID]) + must.MapContainsKey(t, tainted, node4.ID) + must.NotNil(t, tainted[node4.ID]) - require.Contains(t, tainted, "12345678-abcd-efab-cdef-123456789abc") - require.Nil(t, tainted["12345678-abcd-efab-cdef-123456789abc"]) + must.MapContainsKey(t, tainted, "12345678-abcd-efab-cdef-123456789abc") + must.Nil(t, tainted["12345678-abcd-efab-cdef-123456789abc"]) } func TestShuffleNodes(t *testing.T) { @@ -223,13 +221,13 @@ func TestShuffleNodes(t *testing.T) { eval := mock.Eval() // will have random EvalID plan := eval.MakePlan(mock.Job()) shuffleNodes(plan, 1000, nodes) - require.False(t, reflect.DeepEqual(nodes, orig)) + must.NotEq(t, nodes, orig) nodes2 := make([]*structs.Node, len(nodes)) copy(nodes2, orig) shuffleNodes(plan, 1000, nodes2) - require.True(t, reflect.DeepEqual(nodes, nodes2)) + must.Eq(t, nodes, nodes2) } @@ -602,58 +600,58 @@ func TestSetStatus(t *testing.T) { eval := mock.Eval() status := "a" desc := "b" - require.NoError(t, setStatus(logger, h, eval, nil, nil, nil, status, desc, nil, "")) - require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals) + must.NoError(t, setStatus(logger, h, eval, nil, nil, nil, status, desc, nil, "")) + must.Eq(t, 1, len(h.Evals), must.Sprintf("setStatus() didn't update plan: %v", h.Evals)) newEval := h.Evals[0] - require.True(t, newEval.ID == eval.ID && newEval.Status == status && newEval.StatusDescription == desc, - "setStatus() submited invalid eval: %v", newEval) + must.True(t, newEval.ID == eval.ID && newEval.Status == status && newEval.StatusDescription == desc, + must.Sprintf("setStatus() submited invalid eval: %v", newEval)) // Test next evals h = NewHarness(t) next := mock.Eval() - require.NoError(t, setStatus(logger, h, eval, next, nil, nil, status, desc, nil, "")) - require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals) + must.NoError(t, setStatus(logger, h, eval, next, nil, nil, status, desc, nil, "")) + must.Eq(t, 1, len(h.Evals), must.Sprintf("setStatus() didn't update plan: %v", h.Evals)) newEval = h.Evals[0] - require.Equal(t, next.ID, newEval.NextEval, "setStatus() didn't set nextEval correctly: %v", newEval) + must.Eq(t, next.ID, newEval.NextEval, must.Sprintf("setStatus() didn't set nextEval correctly: %v", newEval)) // Test blocked evals h = NewHarness(t) blocked := mock.Eval() - require.NoError(t, setStatus(logger, h, eval, nil, blocked, nil, status, desc, nil, "")) - require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals) + must.NoError(t, setStatus(logger, h, eval, nil, blocked, nil, status, desc, nil, "")) + must.Eq(t, 1, len(h.Evals), must.Sprintf("setStatus() didn't update plan: %v", h.Evals)) newEval = h.Evals[0] - require.Equal(t, blocked.ID, newEval.BlockedEval, "setStatus() didn't set BlockedEval correctly: %v", newEval) + must.Eq(t, blocked.ID, newEval.BlockedEval, must.Sprintf("setStatus() didn't set BlockedEval correctly: %v", newEval)) // Test metrics h = NewHarness(t) metrics := map[string]*structs.AllocMetric{"foo": nil} - require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, nil, "")) - require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals) + must.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, nil, "")) + must.Eq(t, 1, len(h.Evals), must.Sprintf("setStatus() didn't update plan: %v", h.Evals)) newEval = h.Evals[0] - require.True(t, reflect.DeepEqual(newEval.FailedTGAllocs, metrics), - "setStatus() didn't set failed task group metrics correctly: %v", newEval) + must.Eq(t, newEval.FailedTGAllocs, metrics, + must.Sprintf("setStatus() didn't set failed task group metrics correctly: %v", newEval)) // Test queued allocations h = NewHarness(t) queuedAllocs := map[string]int{"web": 1} - require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, "")) - require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals) + must.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, "")) + must.Eq(t, 1, len(h.Evals), must.Sprintf("setStatus() didn't update plan: %v", h.Evals)) newEval = h.Evals[0] - require.True(t, reflect.DeepEqual(newEval.QueuedAllocations, queuedAllocs), "setStatus() didn't set failed task group metrics correctly: %v", newEval) + must.Eq(t, newEval.QueuedAllocations, queuedAllocs, must.Sprintf("setStatus() didn't set failed task group metrics correctly: %v", newEval)) h = NewHarness(t) dID := uuid.Generate() - require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, dID)) - require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals) + must.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, dID)) + must.Eq(t, 1, len(h.Evals), must.Sprintf("setStatus() didn't update plan: %v", h.Evals)) newEval = h.Evals[0] - require.Equal(t, dID, newEval.DeploymentID, "setStatus() didn't set deployment id correctly: %v", newEval) + must.Eq(t, dID, newEval.DeploymentID, must.Sprintf("setStatus() didn't set deployment id correctly: %v", newEval)) } func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { @@ -664,7 +662,7 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { job := mock.Job() node := mock.Node() - require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node)) // Register an alloc alloc := &structs.Allocation{ @@ -690,8 +688,8 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { TaskGroup: "web", } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} - require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) // Create a new task group that prevents in-place updates. tg := &structs.TaskGroup{} @@ -709,8 +707,8 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { // Do the inplace update. unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates) - require.True(t, len(unplaced) == 1 && len(inplace) == 0, "inplaceUpdate incorrectly did an inplace update") - require.Empty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update") + must.True(t, len(unplaced) == 1 && len(inplace) == 0, must.Sprint("inplaceUpdate incorrectly did an inplace update")) + must.MapEmpty(t, ctx.plan.NodeAllocation, must.Sprint("inplaceUpdate incorrectly did an inplace update")) } func TestInplaceUpdate_AllocatedResources(t *testing.T) { @@ -721,7 +719,7 @@ func TestInplaceUpdate_AllocatedResources(t *testing.T) { job := mock.Job() node := mock.Node() - require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node)) // Register an alloc alloc := &structs.Allocation{ @@ -746,8 +744,8 @@ func TestInplaceUpdate_AllocatedResources(t *testing.T) { TaskGroup: "web", } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} - require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) // Update TG to add a new service (inplace) tg := job.TaskGroups[0] @@ -763,13 +761,13 @@ func TestInplaceUpdate_AllocatedResources(t *testing.T) { // Do the inplace update. unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates) - require.True(t, len(unplaced) == 0 && len(inplace) == 1, "inplaceUpdate incorrectly did not perform an inplace update") - require.NotEmpty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update") - require.NotEmpty(t, ctx.plan.NodeAllocation[node.ID][0].AllocatedResources.Shared.Ports) + must.True(t, len(unplaced) == 0 && len(inplace) == 1, must.Sprint("inplaceUpdate incorrectly did not perform an inplace update")) + must.MapNotEmpty(t, ctx.plan.NodeAllocation, must.Sprint("inplaceUpdate incorrectly did an inplace update")) + must.SliceNotEmpty(t, ctx.plan.NodeAllocation[node.ID][0].AllocatedResources.Shared.Ports) port, ok := ctx.plan.NodeAllocation[node.ID][0].AllocatedResources.Shared.Ports.Get("api-port") - require.True(t, ok) - require.Equal(t, 19910, port.Value) + must.True(t, ok) + must.Eq(t, 19910, port.Value) } func TestInplaceUpdate_NoMatch(t *testing.T) { @@ -780,7 +778,7 @@ func TestInplaceUpdate_NoMatch(t *testing.T) { job := mock.Job() node := mock.Node() - require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node)) // Register an alloc alloc := &structs.Allocation{ @@ -806,8 +804,8 @@ func TestInplaceUpdate_NoMatch(t *testing.T) { TaskGroup: "web", } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} - require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) // Create a new task group that requires too much resources. tg := &structs.TaskGroup{} @@ -821,8 +819,8 @@ func TestInplaceUpdate_NoMatch(t *testing.T) { // Do the inplace update. unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates) - require.True(t, len(unplaced) == 1 && len(inplace) == 0, "inplaceUpdate incorrectly did an inplace update") - require.Empty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update") + must.True(t, len(unplaced) == 1 && len(inplace) == 0, must.Sprint("inplaceUpdate incorrectly did an inplace update")) + must.MapEmpty(t, ctx.plan.NodeAllocation, must.Sprint("inplaceUpdate incorrectly did an inplace update")) } func TestInplaceUpdate_Success(t *testing.T) { @@ -833,7 +831,7 @@ func TestInplaceUpdate_Success(t *testing.T) { job := mock.Job() node := mock.Node() - require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node)) // Register an alloc alloc := &structs.Allocation{ @@ -859,8 +857,8 @@ func TestInplaceUpdate_Success(t *testing.T) { DesiredStatus: structs.AllocDesiredStatusRun, } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} - require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID))) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) + must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID))) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) // Create a new task group that updates the resources. tg := &structs.TaskGroup{} @@ -891,23 +889,23 @@ func TestInplaceUpdate_Success(t *testing.T) { // Do the inplace update. unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates) - require.True(t, len(unplaced) == 0 && len(inplace) == 1, "inplaceUpdate did not do an inplace update") - require.Equal(t, 1, len(ctx.plan.NodeAllocation), "inplaceUpdate did not do an inplace update") - require.Equal(t, alloc.ID, inplace[0].Alloc.ID, "inplaceUpdate returned the wrong, inplace updated alloc: %#v", inplace) + must.True(t, len(unplaced) == 0 && len(inplace) == 1, must.Sprint("inplaceUpdate did not do an inplace update")) + must.Eq(t, 1, len(ctx.plan.NodeAllocation), must.Sprint("inplaceUpdate did not do an inplace update")) + must.Eq(t, alloc.ID, inplace[0].Alloc.ID, must.Sprintf("inplaceUpdate returned the wrong, inplace updated alloc: %#v", inplace)) // Get the alloc we inserted. a := inplace[0].Alloc // TODO(sean@): Verify this is correct vs: ctx.plan.NodeAllocation[alloc.NodeID][0] - require.NotNil(t, a.Job) - require.Equal(t, 1, len(a.Job.TaskGroups)) - require.Equal(t, 1, len(a.Job.TaskGroups[0].Tasks)) - require.Equal(t, 3, len(a.Job.TaskGroups[0].Tasks[0].Services), - "Expected number of services: %v, Actual: %v", 3, len(a.Job.TaskGroups[0].Tasks[0].Services)) + must.NotNil(t, a.Job) + must.Eq(t, 1, len(a.Job.TaskGroups)) + must.Eq(t, 1, len(a.Job.TaskGroups[0].Tasks)) + must.Eq(t, 3, len(a.Job.TaskGroups[0].Tasks[0].Services), must.Sprintf( + "Expected number of services: %v, Actual: %v", 3, len(a.Job.TaskGroups[0].Tasks[0].Services))) serviceNames := make(map[string]struct{}, 3) for _, consulService := range a.Job.TaskGroups[0].Tasks[0].Services { serviceNames[consulService.Name] = struct{}{} } - require.Equal(t, 3, len(serviceNames)) + must.Eq(t, 3, len(serviceNames)) for _, name := range []string{"dummy-service", "dummy-service2", "web-frontend"} { if _, found := serviceNames[name]; !found { @@ -1051,23 +1049,23 @@ func TestUtil_connectSidecarServiceUpdated(t *testing.T) { ci.Parallel(t) t.Run("both nil", func(t *testing.T) { - require.False(t, connectSidecarServiceUpdated(nil, nil).modified) + must.False(t, connectSidecarServiceUpdated(nil, nil).modified) }) t.Run("one nil", func(t *testing.T) { - require.True(t, connectSidecarServiceUpdated(nil, new(structs.ConsulSidecarService)).modified) + must.True(t, connectSidecarServiceUpdated(nil, new(structs.ConsulSidecarService)).modified) }) t.Run("ports differ", func(t *testing.T) { a := &structs.ConsulSidecarService{Port: "1111"} b := &structs.ConsulSidecarService{Port: "2222"} - require.True(t, connectSidecarServiceUpdated(a, b).modified) + must.True(t, connectSidecarServiceUpdated(a, b).modified) }) t.Run("same", func(t *testing.T) { a := &structs.ConsulSidecarService{Port: "1111"} b := &structs.ConsulSidecarService{Port: "1111"} - require.False(t, connectSidecarServiceUpdated(a, b).modified) + must.False(t, connectSidecarServiceUpdated(a, b).modified) }) } @@ -1144,17 +1142,17 @@ func TestTaskGroupConstraints(t *testing.T) { expDrivers := map[string]struct{}{"exec": {}, "docker": {}} actConstrains := taskGroupConstraints(tg) - require.True(t, reflect.DeepEqual(actConstrains.constraints, expConstr), - "taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr) - require.True(t, reflect.DeepEqual(actConstrains.drivers, expDrivers), - "taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers) + must.Eq(t, actConstrains.constraints, expConstr, must.Sprintf( + "taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr)) + must.Eq(t, actConstrains.drivers, expDrivers, must.Sprintf( + "taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers)) } func TestProgressMade(t *testing.T) { ci.Parallel(t) noopPlan := &structs.PlanResult{} - require.False(t, progressMade(nil) || progressMade(noopPlan), "no progress plan marked as making progress") + must.False(t, progressMade(nil) || progressMade(noopPlan), must.Sprint("no progress plan marked as making progress")) m := map[string][]*structs.Allocation{ "foo": {mock.Alloc()}, @@ -1172,7 +1170,7 @@ func TestProgressMade(t *testing.T) { }, } - require.True(t, progressMade(both) && progressMade(update) && progressMade(alloc) && + must.True(t, progressMade(both) && progressMade(update) && progressMade(alloc) && progressMade(deployment) && progressMade(deploymentUpdates)) } @@ -1231,7 +1229,7 @@ func TestDesiredUpdates(t *testing.T) { } desired := desiredUpdates(diff, inplace, destructive) - require.True(t, reflect.DeepEqual(desired, expected), "desiredUpdates() returned %#v; want %#v", desired, expected) + must.Eq(t, desired, expected, must.Sprintf("desiredUpdates() returned %#v; want %#v", desired, expected)) } func TestUtil_AdjustQueuedAllocations(t *testing.T) { @@ -1268,7 +1266,7 @@ func TestUtil_AdjustQueuedAllocations(t *testing.T) { queuedAllocs := map[string]int{"web": 2} adjustQueuedAllocations(logger, &planResult, queuedAllocs) - require.Equal(t, 1, queuedAllocs["web"]) + must.Eq(t, 1, queuedAllocs["web"]) } func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) { @@ -1308,7 +1306,7 @@ func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) { allocsLost = append(allocsLost, alloc.ID) } expected := []string{alloc1.ID, alloc2.ID} - require.True(t, reflect.DeepEqual(allocsLost, expected), "actual: %v, expected: %v", allocsLost, expected) + must.Eq(t, allocsLost, expected, must.Sprintf("actual: %v, expected: %v", allocsLost, expected)) // Update the node status to ready and try again plan = structs.Plan{ @@ -1322,7 +1320,7 @@ func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) { allocsLost = append(allocsLost, alloc.ID) } expected = []string{} - require.True(t, reflect.DeepEqual(allocsLost, expected), "actual: %v, expected: %v", allocsLost, expected) + must.Eq(t, allocsLost, expected, must.Sprintf("actual: %v, expected: %v", allocsLost, expected)) } func TestTaskGroupUpdated_Restart(t *testing.T) {