mirror of
https://github.com/kemko/nomad.git
synced 2026-01-01 16:05:42 +03:00
testing: migrate nomad/scheduler off of testify (#25968)
In the spirit of #25909, this PR removes testify dependencies from the scheduler package, along with reflect.DeepEqual removal. This is again a combination of semgrep and hx editing magic. --------- Co-authored-by: Tim Gross <tgross@hashicorp.com>
This commit is contained in:
committed by
GitHub
parent
34e96932a1
commit
648bacda77
@@ -29,3 +29,4 @@ rules:
|
||||
paths:
|
||||
include:
|
||||
- "nomad/state/*_test.go"
|
||||
- "nomad/scheduler/*_test.go"
|
||||
|
||||
@@ -4,11 +4,11 @@
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/shoenig/test/must"
|
||||
)
|
||||
|
||||
func TestAnnotateTaskGroup_Updates(t *testing.T) {
|
||||
@@ -50,9 +50,8 @@ func TestAnnotateTaskGroup_Updates(t *testing.T) {
|
||||
t.Fatalf("annotateTaskGroup(%#v, %#v) failed: %#v", tgDiff, annotations, err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tgDiff, expected) {
|
||||
t.Fatalf("got %#v, want %#v", tgDiff, expected)
|
||||
}
|
||||
must.Eq(t, expected, tgDiff)
|
||||
|
||||
}
|
||||
|
||||
func TestAnnotateCountChange_NonEdited(t *testing.T) {
|
||||
@@ -61,9 +60,8 @@ func TestAnnotateCountChange_NonEdited(t *testing.T) {
|
||||
tg := &structs.TaskGroupDiff{}
|
||||
tgOrig := &structs.TaskGroupDiff{}
|
||||
annotateCountChange(tg)
|
||||
if !reflect.DeepEqual(tgOrig, tg) {
|
||||
t.Fatalf("annotateCountChange(%#v) should not have caused any annotation: %#v", tgOrig, tg)
|
||||
}
|
||||
must.Eq(t, tgOrig, tg)
|
||||
|
||||
}
|
||||
|
||||
func TestAnnotateCountChange(t *testing.T) {
|
||||
@@ -116,9 +114,8 @@ func TestAnnotateTask_NonEdited(t *testing.T) {
|
||||
td := &structs.TaskDiff{Type: structs.DiffTypeNone}
|
||||
tdOrig := &structs.TaskDiff{Type: structs.DiffTypeNone}
|
||||
annotateTask(td, tgd)
|
||||
if !reflect.DeepEqual(tdOrig, td) {
|
||||
t.Fatalf("annotateTask(%#v) should not have caused any annotation: %#v", tdOrig, td)
|
||||
}
|
||||
must.Eq(t, tdOrig, td)
|
||||
|
||||
}
|
||||
|
||||
func TestAnnotateTask(t *testing.T) {
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
|
||||
"github.com/hashicorp/nomad/helper/raftutil"
|
||||
"github.com/hashicorp/nomad/scheduler"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/shoenig/test/must"
|
||||
)
|
||||
|
||||
// NewBenchmarkingHarness creates a starting test harness with state
|
||||
@@ -30,13 +30,13 @@ func NewBenchmarkingHarness(t testing.TB) *scheduler.Harness {
|
||||
datadir := os.Getenv("NOMAD_BENCHMARK_DATADIR")
|
||||
if datadir != "" {
|
||||
h, err := NewHarnessFromDataDir(t, datadir)
|
||||
require.NoError(t, err)
|
||||
must.NoError(t, err)
|
||||
return h
|
||||
} else {
|
||||
snapshotPath := os.Getenv("NOMAD_BENCHMARK_SNAPSHOT")
|
||||
if snapshotPath != "" {
|
||||
h, err := NewHarnessFromSnapshot(t, snapshotPath)
|
||||
require.NoError(t, err)
|
||||
must.NoError(t, err)
|
||||
return h
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/hashicorp/nomad/nomad/state"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/shoenig/test/must"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testContext(t testing.TB) (*state.StateStore, *EvalContext) {
|
||||
@@ -160,9 +159,9 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
must.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
|
||||
// Add a planned eviction to alloc1
|
||||
plan := ctx.Plan()
|
||||
@@ -299,10 +298,10 @@ func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) {
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(allocEvict.JobID)))
|
||||
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPreempt.JobID)))
|
||||
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPropose.JobID)))
|
||||
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{allocEvict, allocPreempt, allocPropose}))
|
||||
must.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(allocEvict.JobID)))
|
||||
must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPreempt.JobID)))
|
||||
must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPropose.JobID)))
|
||||
must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{allocEvict, allocPreempt, allocPropose}))
|
||||
|
||||
// Plan to evict one alloc and preempt another
|
||||
plan := ctx.Plan()
|
||||
@@ -310,8 +309,8 @@ func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) {
|
||||
plan.NodeUpdate[nodes[0].Node.ID] = []*structs.Allocation{allocPreempt}
|
||||
|
||||
proposed, err := ctx.ProposedAllocs(nodes[0].Node.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, proposed, 1)
|
||||
must.NoError(t, err)
|
||||
must.SliceLen(t, 1, proposed)
|
||||
}
|
||||
|
||||
func TestEvalEligibility_JobStatus(t *testing.T) {
|
||||
@@ -431,7 +430,7 @@ func TestEvalEligibility_GetClasses(t *testing.T) {
|
||||
}
|
||||
|
||||
actClasses := e.GetClasses()
|
||||
require.Equal(t, expClasses, actClasses)
|
||||
must.Eq(t, expClasses, actClasses)
|
||||
}
|
||||
func TestEvalEligibility_GetClasses_JobEligible_TaskGroupIneligible(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
@@ -455,7 +454,7 @@ func TestEvalEligibility_GetClasses_JobEligible_TaskGroupIneligible(t *testing.T
|
||||
}
|
||||
|
||||
actClasses := e.GetClasses()
|
||||
require.Equal(t, expClasses, actClasses)
|
||||
must.Eq(t, expClasses, actClasses)
|
||||
}
|
||||
|
||||
func TestPortCollisionEvent_Copy(t *testing.T) {
|
||||
@@ -503,6 +502,6 @@ func TestPortCollisionEvent_Sanitize(t *testing.T) {
|
||||
}
|
||||
|
||||
cleanEv := ev.Sanitize()
|
||||
require.Empty(t, cleanEv.Node.SecretID)
|
||||
require.Nil(t, cleanEv.Allocations[0].Job)
|
||||
must.Eq(t, "", cleanEv.Node.SecretID)
|
||||
must.Nil(t, cleanEv.Allocations[0].Job)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
|
||||
"github.com/shoenig/test/must"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func anyMemoryNodeMatcher() *memoryNodeMatcher {
|
||||
@@ -103,68 +102,64 @@ func collectInstanceIDs(devices ...*structs.NodeDeviceResource) []string {
|
||||
func TestDeviceAllocator_Allocate_GenericRequest(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
_, ctx := testContext(t)
|
||||
n := devNode()
|
||||
d := newDeviceAllocator(ctx, n)
|
||||
require.NotNil(d)
|
||||
must.NotNil(t, d)
|
||||
|
||||
// Build the request
|
||||
ask := deviceRequest("gpu", 1, nil, nil)
|
||||
|
||||
mem := anyMemoryNodeMatcher()
|
||||
out, score, err := d.createOffer(mem, ask)
|
||||
require.NotNil(out)
|
||||
require.Zero(score)
|
||||
require.NoError(err)
|
||||
must.NotNil(t, out)
|
||||
must.Zero(t, score)
|
||||
must.NoError(t, err)
|
||||
|
||||
// Check that we got the nvidia device
|
||||
require.Len(out.DeviceIDs, 1)
|
||||
require.Contains(collectInstanceIDs(n.NodeResources.Devices[0]), out.DeviceIDs[0])
|
||||
must.SliceLen(t, 1, out.DeviceIDs)
|
||||
must.SliceContains(t, collectInstanceIDs(n.NodeResources.Devices[0]), out.DeviceIDs[0])
|
||||
}
|
||||
|
||||
// Test that asking for a device that is fully specified works.
|
||||
func TestDeviceAllocator_Allocate_FullyQualifiedRequest(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
_, ctx := testContext(t)
|
||||
n := devNode()
|
||||
d := newDeviceAllocator(ctx, n)
|
||||
require.NotNil(d)
|
||||
must.NotNil(t, d)
|
||||
|
||||
// Build the request
|
||||
ask := deviceRequest("intel/fpga/F100", 1, nil, nil)
|
||||
|
||||
mem := anyMemoryNodeMatcher()
|
||||
out, score, err := d.createOffer(mem, ask)
|
||||
require.NotNil(out)
|
||||
require.Zero(score)
|
||||
require.NoError(err)
|
||||
must.NotNil(t, out)
|
||||
must.Zero(t, score)
|
||||
must.NoError(t, err)
|
||||
|
||||
// Check that we got the nvidia device
|
||||
require.Len(out.DeviceIDs, 1)
|
||||
require.Contains(collectInstanceIDs(n.NodeResources.Devices[1]), out.DeviceIDs[0])
|
||||
must.SliceLen(t, 1, out.DeviceIDs)
|
||||
must.SliceContains(t, collectInstanceIDs(n.NodeResources.Devices[1]), out.DeviceIDs[0])
|
||||
}
|
||||
|
||||
// Test that asking for a device with too much count doesn't place
|
||||
func TestDeviceAllocator_Allocate_NotEnoughInstances(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
_, ctx := testContext(t)
|
||||
n := devNode()
|
||||
d := newDeviceAllocator(ctx, n)
|
||||
require.NotNil(d)
|
||||
must.NotNil(t, d)
|
||||
|
||||
// Build the request
|
||||
ask := deviceRequest("gpu", 4, nil, nil)
|
||||
|
||||
mem := anyMemoryNodeMatcher()
|
||||
out, _, err := d.createOffer(mem, ask)
|
||||
require.Nil(out)
|
||||
require.Error(err)
|
||||
require.Contains(err.Error(), "no devices match request")
|
||||
must.Nil(t, out)
|
||||
must.ErrorContains(t, err, "no devices match request")
|
||||
}
|
||||
|
||||
func TestDeviceAllocator_Allocate_NUMA_available(t *testing.T) {
|
||||
@@ -338,14 +333,14 @@ func TestDeviceAllocator_Allocate_Constraints(t *testing.T) {
|
||||
mem := anyMemoryNodeMatcher()
|
||||
out, score, err := d.createOffer(mem, ask)
|
||||
if c.NoPlacement {
|
||||
require.Nil(t, out)
|
||||
must.Nil(t, out)
|
||||
} else {
|
||||
must.NotNil(t, out)
|
||||
must.Zero(t, score)
|
||||
must.NoError(t, err)
|
||||
|
||||
// Check that we got the right nvidia device instance, and
|
||||
// specific device instance IDs if required
|
||||
// specific device instance IDs if mustd
|
||||
must.Len(t, 1, out.DeviceIDs)
|
||||
must.SliceContains(t, collectInstanceIDs(c.ExpectedDevice), out.DeviceIDs[0])
|
||||
must.SliceContainsSubset(t, c.ExpectedDeviceIDs, out.DeviceIDs)
|
||||
@@ -434,27 +429,26 @@ func TestDeviceAllocator_Allocate_Affinities(t *testing.T) {
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.Name, func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
_, ctx := testContext(t)
|
||||
d := newDeviceAllocator(ctx, n)
|
||||
require.NotNil(d)
|
||||
must.NotNil(t, d)
|
||||
|
||||
// Build the request
|
||||
ask := deviceRequest(c.Name, 1, nil, c.Affinities)
|
||||
|
||||
mem := anyMemoryNodeMatcher()
|
||||
out, score, err := d.createOffer(mem, ask)
|
||||
require.NotNil(out)
|
||||
require.NoError(err)
|
||||
must.NotNil(t, out)
|
||||
must.NoError(t, err)
|
||||
if c.ZeroScore {
|
||||
require.Zero(score)
|
||||
must.Zero(t, score)
|
||||
} else {
|
||||
require.NotZero(score)
|
||||
must.NonZero(t, score)
|
||||
}
|
||||
|
||||
// Check that we got the nvidia device
|
||||
require.Len(out.DeviceIDs, 1)
|
||||
require.Contains(collectInstanceIDs(c.ExpectedDevice), out.DeviceIDs[0])
|
||||
must.SliceLen(t, 1, out.DeviceIDs)
|
||||
must.SliceContains(t, collectInstanceIDs(c.ExpectedDevice), out.DeviceIDs[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -64,9 +63,8 @@ func TestStaticIterator_SetNodes(t *testing.T) {
|
||||
static.SetNodes(newNodes)
|
||||
|
||||
out := collectFeasible(static)
|
||||
if !reflect.DeepEqual(out, newNodes) {
|
||||
t.Fatalf("bad: %#v", out)
|
||||
}
|
||||
must.Eq(t, newNodes, out)
|
||||
|
||||
}
|
||||
|
||||
func TestRandomIterator(t *testing.T) {
|
||||
@@ -86,9 +84,8 @@ func TestRandomIterator(t *testing.T) {
|
||||
if len(out) != len(nodes) {
|
||||
t.Fatalf("missing nodes")
|
||||
}
|
||||
if reflect.DeepEqual(out, nodes) {
|
||||
t.Fatalf("same order")
|
||||
}
|
||||
must.NotEq(t, nodes, out)
|
||||
|
||||
}
|
||||
|
||||
func TestHostVolumeChecker_Static(t *testing.T) {
|
||||
@@ -1360,7 +1357,7 @@ func TestResolveConstraintTarget(t *testing.T) {
|
||||
type tcase struct {
|
||||
target string
|
||||
node *structs.Node
|
||||
val interface{}
|
||||
val string
|
||||
result bool
|
||||
}
|
||||
node := mock.Node()
|
||||
@@ -1422,11 +1419,9 @@ func TestResolveConstraintTarget(t *testing.T) {
|
||||
|
||||
for _, tc := range cases {
|
||||
res, ok := resolveTarget(tc.target, tc.node)
|
||||
if ok != tc.result {
|
||||
t.Fatalf("TC: %#v, Result: %v %v", tc, res, ok)
|
||||
}
|
||||
if ok && !reflect.DeepEqual(res, tc.val) {
|
||||
t.Fatalf("TC: %#v, Result: %v %v", tc, res, ok)
|
||||
must.Eq(t, ok, tc.result)
|
||||
if ok {
|
||||
must.Eq(t, res, tc.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/hashicorp/nomad/client/lib/numalib"
|
||||
"github.com/hashicorp/nomad/client/lib/numalib/hw"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/shoenig/test/must"
|
||||
)
|
||||
|
||||
func TestCoreSelectorSelect(t *testing.T) {
|
||||
@@ -46,7 +46,7 @@ func TestCoreSelectorSelect(t *testing.T) {
|
||||
GuessSpeed: 0,
|
||||
}
|
||||
}
|
||||
require.Equal(t, coreIds, []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47})
|
||||
must.Eq(t, []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47}, coreIds)
|
||||
|
||||
selector := &coreSelector{
|
||||
topology: &numalib.Topology{
|
||||
@@ -88,8 +88,8 @@ func TestCoreSelectorSelect(t *testing.T) {
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ids, mhz := selector.Select(test.resources)
|
||||
require.Equal(t, test.expectedIds, ids)
|
||||
require.Equal(t, test.expectedMhz, mhz)
|
||||
must.Eq(t, test.expectedIds, ids)
|
||||
must.Eq(t, test.expectedMhz, mhz)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,18 +5,15 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/lib/numalib"
|
||||
"github.com/hashicorp/nomad/client/lib/numalib/hw"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/shoenig/test/must"
|
||||
)
|
||||
|
||||
func TestResourceDistance(t *testing.T) {
|
||||
@@ -140,80 +137,14 @@ func TestResourceDistance(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run("", func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
actualDistance := fmt.Sprintf("%3.3f", basicResourceDistance(resourceAsk, tc.allocResource))
|
||||
require.Equal(tc.expectedDistance, actualDistance)
|
||||
must.Eq(t, tc.expectedDistance, actualDistance)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func makeNodeResources(devices []*structs.NodeDeviceResource, busAssociativity map[string]hw.NodeID) *structs.NodeResources {
|
||||
makeCore := func(node hw.NodeID, id hw.CoreID) numalib.Core {
|
||||
sockets := map[hw.NodeID]hw.SocketID{
|
||||
0: 0,
|
||||
1: 0,
|
||||
2: 1,
|
||||
3: 1,
|
||||
}
|
||||
return numalib.Core{
|
||||
NodeID: node,
|
||||
SocketID: sockets[node],
|
||||
ID: id,
|
||||
Grade: numalib.Performance,
|
||||
BaseSpeed: 4000,
|
||||
}
|
||||
}
|
||||
|
||||
// 2 socket, 4 numa node system, 2 cores per node
|
||||
processors := structs.NodeProcessorResources{
|
||||
Topology: &numalib.Topology{
|
||||
Nodes: []uint8{0, 1, 2, 3},
|
||||
Distances: numalib.SLIT{
|
||||
[]numalib.Cost{10, 12, 32, 32},
|
||||
[]numalib.Cost{12, 10, 32, 32},
|
||||
[]numalib.Cost{32, 32, 10, 12},
|
||||
[]numalib.Cost{32, 32, 12, 10},
|
||||
},
|
||||
Cores: []numalib.Core{
|
||||
makeCore(0, 0),
|
||||
makeCore(0, 1),
|
||||
makeCore(1, 2),
|
||||
makeCore(1, 3),
|
||||
makeCore(2, 4),
|
||||
makeCore(2, 5),
|
||||
makeCore(3, 6),
|
||||
makeCore(3, 7),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
defaultNodeResources := &structs.NodeResources{
|
||||
Processors: processors,
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 8192,
|
||||
},
|
||||
Disk: structs.NodeDiskResources{
|
||||
DiskMB: 100 * 1024,
|
||||
},
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
CIDR: "192.168.0.100/32",
|
||||
MBits: 1000,
|
||||
},
|
||||
},
|
||||
Devices: devices,
|
||||
}
|
||||
|
||||
defaultNodeResources.Compatibility()
|
||||
|
||||
defaultNodeResources.Processors.Topology.BusAssociativity = maps.Clone(busAssociativity)
|
||||
|
||||
return defaultNodeResources
|
||||
}
|
||||
|
||||
func makeDeviceInstance(instanceID, busID string) *structs.NodeDevice {
|
||||
return &structs.NodeDevice{
|
||||
ID: instanceID,
|
||||
@@ -1395,10 +1326,9 @@ func TestPreemption_Normal(t *testing.T) {
|
||||
for _, alloc := range tc.currentAllocations {
|
||||
alloc.NodeID = node.ID
|
||||
}
|
||||
require := require.New(t)
|
||||
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, tc.currentAllocations)
|
||||
|
||||
require.Nil(err)
|
||||
must.NoError(t, err)
|
||||
if tc.currentPreemptions != nil {
|
||||
ctx.plan.NodePreemptions[node.ID] = tc.currentPreemptions
|
||||
}
|
||||
@@ -1422,14 +1352,14 @@ func TestPreemption_Normal(t *testing.T) {
|
||||
binPackIter.SetTaskGroup(taskGroup)
|
||||
option := binPackIter.Next()
|
||||
if tc.preemptedAllocIDs == nil {
|
||||
require.Nil(option)
|
||||
must.Nil(t, option)
|
||||
} else {
|
||||
require.NotNil(option)
|
||||
must.NotNil(t, option)
|
||||
preemptedAllocs := option.PreemptedAllocs
|
||||
require.Equal(len(tc.preemptedAllocIDs), len(preemptedAllocs))
|
||||
must.Eq(t, len(tc.preemptedAllocIDs), len(preemptedAllocs))
|
||||
for _, alloc := range preemptedAllocs {
|
||||
_, ok := tc.preemptedAllocIDs[alloc.ID]
|
||||
require.Truef(ok, "alloc %s was preempted unexpectedly", alloc.ID)
|
||||
must.True(t, ok, must.Sprintf("alloc %s was preempted unexpectedly", alloc.ID))
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -1502,7 +1432,7 @@ func TestPreemptionMultiple(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
||||
must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
||||
|
||||
// low priority job with 4 allocs using all 4 GPUs
|
||||
lowPrioJob := mock.Job()
|
||||
@@ -1515,7 +1445,7 @@ func TestPreemptionMultiple(t *testing.T) {
|
||||
Name: "gpu",
|
||||
Count: 1,
|
||||
}}
|
||||
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, lowPrioJob))
|
||||
must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, lowPrioJob))
|
||||
|
||||
allocs := []*structs.Allocation{}
|
||||
allocIDs := map[string]struct{}{}
|
||||
@@ -1531,7 +1461,7 @@ func TestPreemptionMultiple(t *testing.T) {
|
||||
allocs = append(allocs, alloc)
|
||||
allocIDs[alloc.ID] = struct{}{}
|
||||
}
|
||||
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
||||
must.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
||||
|
||||
// new high priority job with 2 allocs, each using 2 GPUs
|
||||
highPrioJob := mock.Job()
|
||||
@@ -1544,7 +1474,7 @@ func TestPreemptionMultiple(t *testing.T) {
|
||||
Name: "gpu",
|
||||
Count: 2,
|
||||
}}
|
||||
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, highPrioJob))
|
||||
must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, highPrioJob))
|
||||
|
||||
// schedule
|
||||
eval := &structs.Evaluation{
|
||||
@@ -1555,18 +1485,18 @@ func TestPreemptionMultiple(t *testing.T) {
|
||||
JobID: highPrioJob.ID,
|
||||
Status: structs.EvalStatusPending,
|
||||
}
|
||||
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
||||
|
||||
// Process the evaluation
|
||||
require.NoError(t, h.Process(NewServiceScheduler, eval))
|
||||
require.Len(t, h.Plans, 1)
|
||||
require.Contains(t, h.Plans[0].NodePreemptions, node.ID)
|
||||
must.NoError(t, h.Process(NewServiceScheduler, eval))
|
||||
must.Len(t, 1, h.Plans)
|
||||
must.MapContainsKey(t, h.Plans[0].NodePreemptions, node.ID)
|
||||
|
||||
preempted := map[string]struct{}{}
|
||||
for _, alloc := range h.Plans[0].NodePreemptions[node.ID] {
|
||||
preempted[alloc.ID] = struct{}{}
|
||||
}
|
||||
require.Equal(t, allocIDs, preempted)
|
||||
must.Eq(t, allocIDs, preempted)
|
||||
}
|
||||
|
||||
// helper method to create allocations with given jobs and resources
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/shoenig/test"
|
||||
"github.com/shoenig/test/must"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var testSchedulerConfig = &structs.SchedulerConfiguration{
|
||||
@@ -255,15 +254,15 @@ func TestBinPackIterator_NoExistingAlloc_MixedReserve(t *testing.T) {
|
||||
}
|
||||
|
||||
// 3 nodes should be feasible
|
||||
require.Len(t, out, 3)
|
||||
must.Len(t, 3, out)
|
||||
|
||||
// Node without reservations is the best fit
|
||||
require.Equal(t, nodes[0].Node.Name, out[0].Node.Name)
|
||||
must.Eq(t, nodes[0].Node.Name, out[0].Node.Name)
|
||||
|
||||
// Node with smallest remaining resources ("best fit") should get a
|
||||
// higher score than node with more remaining resources ("worse fit")
|
||||
require.Equal(t, nodes[1].Node.Name, out[1].Node.Name)
|
||||
require.Equal(t, nodes[2].Node.Name, out[2].Node.Name)
|
||||
must.Eq(t, nodes[1].Node.Name, out[1].Node.Name)
|
||||
must.Eq(t, nodes[2].Node.Name, out[2].Node.Name)
|
||||
}
|
||||
|
||||
// Tests bin packing iterator with network resources at task and task group level
|
||||
@@ -367,27 +366,26 @@ func TestBinPackIterator_Network_Success(t *testing.T) {
|
||||
scoreNorm := NewScoreNormalizationIterator(ctx, binp)
|
||||
|
||||
out := collectRanked(scoreNorm)
|
||||
require := require.New(t)
|
||||
|
||||
// We expect both nodes to be eligible to place
|
||||
require.Len(out, 2)
|
||||
require.Equal(out[0], nodes[0])
|
||||
require.Equal(out[1], nodes[1])
|
||||
must.Len(t, 2, out)
|
||||
must.Eq(t, nodes[0], out[0])
|
||||
must.Eq(t, nodes[1], out[1])
|
||||
|
||||
// First node should have a perfect score
|
||||
require.Equal(1.0, out[0].FinalScore)
|
||||
must.Eq(t, 1.0, out[0].FinalScore)
|
||||
|
||||
if out[1].FinalScore < 0.50 || out[1].FinalScore > 0.60 {
|
||||
t.Fatalf("Bad Score: %v", out[1].FinalScore)
|
||||
}
|
||||
|
||||
// Verify network information at taskgroup level
|
||||
require.Equal(500, out[0].AllocResources.Networks[0].MBits)
|
||||
require.Equal(500, out[1].AllocResources.Networks[0].MBits)
|
||||
must.Eq(t, 500, out[0].AllocResources.Networks[0].MBits)
|
||||
must.Eq(t, 500, out[1].AllocResources.Networks[0].MBits)
|
||||
|
||||
// Verify network information at task level
|
||||
require.Equal(300, out[0].TaskResources["web"].Networks[0].MBits)
|
||||
require.Equal(300, out[1].TaskResources["web"].Networks[0].MBits)
|
||||
must.Eq(t, 300, out[0].TaskResources["web"].Networks[0].MBits)
|
||||
must.Eq(t, 300, out[1].TaskResources["web"].Networks[0].MBits)
|
||||
}
|
||||
|
||||
// Tests that bin packing iterator fails due to overprovisioning of network
|
||||
@@ -499,12 +497,11 @@ func TestBinPackIterator_Network_Failure(t *testing.T) {
|
||||
scoreNorm := NewScoreNormalizationIterator(ctx, binp)
|
||||
|
||||
out := collectRanked(scoreNorm)
|
||||
require := require.New(t)
|
||||
|
||||
// We expect a placement failure because we need 800 mbits of network
|
||||
// and only 300 is free
|
||||
require.Len(out, 0)
|
||||
require.Equal(1, ctx.metrics.DimensionExhausted["network: bandwidth exceeded"])
|
||||
must.Len(t, 0, out)
|
||||
must.Eq(t, 1, ctx.metrics.DimensionExhausted["network: bandwidth exceeded"])
|
||||
}
|
||||
|
||||
func TestBinPackIterator_Network_NoCollision_Node(t *testing.T) {
|
||||
@@ -595,7 +592,7 @@ func TestBinPackIterator_Network_NoCollision_Node(t *testing.T) {
|
||||
|
||||
// Placement should succeed since reserved ports are merged instead of
|
||||
// treating them as a collision
|
||||
require.Len(t, out, 1)
|
||||
must.Len(t, 1, out)
|
||||
}
|
||||
|
||||
// TestBinPackIterator_Network_NodeError asserts that NetworkIndex.SetNode can
|
||||
@@ -694,9 +691,9 @@ func TestBinPackIterator_Network_NodeError(t *testing.T) {
|
||||
|
||||
// We expect a placement failure because the node has invalid reserved
|
||||
// ports
|
||||
require.Len(t, out, 0)
|
||||
require.Equal(t, 1, ctx.metrics.DimensionExhausted["network: invalid node"],
|
||||
ctx.metrics.DimensionExhausted)
|
||||
must.Len(t, 0, out)
|
||||
must.Eq(t, 1, ctx.metrics.DimensionExhausted["network: invalid node"],
|
||||
must.Sprint(ctx.metrics.DimensionExhausted))
|
||||
}
|
||||
|
||||
func TestBinPackIterator_Network_PortCollision_Alloc(t *testing.T) {
|
||||
@@ -786,9 +783,9 @@ func TestBinPackIterator_Network_PortCollision_Alloc(t *testing.T) {
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
must.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
|
||||
taskGroup := &structs.TaskGroup{
|
||||
EphemeralDisk: &structs.EphemeralDisk{},
|
||||
@@ -820,8 +817,8 @@ func TestBinPackIterator_Network_PortCollision_Alloc(t *testing.T) {
|
||||
out := collectRanked(scoreNorm)
|
||||
|
||||
// We expect a placement failure due to port collision.
|
||||
require.Len(t, out, 0)
|
||||
require.Equal(t, 1, ctx.metrics.DimensionExhausted["network: port collision"])
|
||||
must.Len(t, 0, out)
|
||||
must.Eq(t, 1, ctx.metrics.DimensionExhausted["network: port collision"])
|
||||
}
|
||||
|
||||
// Tests bin packing iterator with host network interpolation of task group level ports configuration
|
||||
@@ -961,18 +958,17 @@ func TestBinPackIterator_Network_Interpolation_Success(t *testing.T) {
|
||||
scoreNorm := NewScoreNormalizationIterator(ctx, binp)
|
||||
|
||||
out := collectRanked(scoreNorm)
|
||||
require := require.New(t)
|
||||
|
||||
// We expect both nodes to be eligible to place
|
||||
require.Len(out, 2)
|
||||
require.Equal(out[0], nodes[0])
|
||||
require.Equal(out[1], nodes[1])
|
||||
must.Len(t, 2, out)
|
||||
must.Eq(t, out[0], nodes[0])
|
||||
must.Eq(t, out[1], nodes[1])
|
||||
|
||||
// Verify network information at taskgroup level
|
||||
require.Contains([]string{"public", "private"}, out[0].AllocResources.Networks[0].DynamicPorts[0].HostNetwork)
|
||||
require.Contains([]string{"public", "private"}, out[0].AllocResources.Networks[0].DynamicPorts[1].HostNetwork)
|
||||
require.Contains([]string{"first", "second"}, out[1].AllocResources.Networks[0].DynamicPorts[0].HostNetwork)
|
||||
require.Contains([]string{"first", "second"}, out[1].AllocResources.Networks[0].DynamicPorts[1].HostNetwork)
|
||||
must.SliceContains(t, []string{"public", "private"}, out[0].AllocResources.Networks[0].DynamicPorts[0].HostNetwork)
|
||||
must.SliceContains(t, []string{"public", "private"}, out[0].AllocResources.Networks[0].DynamicPorts[1].HostNetwork)
|
||||
must.SliceContains(t, []string{"first", "second"}, out[1].AllocResources.Networks[0].DynamicPorts[0].HostNetwork)
|
||||
must.SliceContains(t, []string{"first", "second"}, out[1].AllocResources.Networks[0].DynamicPorts[1].HostNetwork)
|
||||
}
|
||||
|
||||
// Tests that bin packing iterator fails due to absence of meta value
|
||||
@@ -1072,8 +1068,7 @@ func TestBinPackIterator_Host_Network_Interpolation_Absent_Value(t *testing.T) {
|
||||
scoreNorm := NewScoreNormalizationIterator(ctx, binp)
|
||||
|
||||
out := collectRanked(scoreNorm)
|
||||
require := require.New(t)
|
||||
require.Len(out, 0)
|
||||
must.Len(t, 0, out)
|
||||
}
|
||||
|
||||
// Tests that bin packing iterator fails due to absence of meta value
|
||||
@@ -1173,8 +1168,7 @@ func TestBinPackIterator_Host_Network_Interpolation_Interface_Not_Exists(t *test
|
||||
scoreNorm := NewScoreNormalizationIterator(ctx, binp)
|
||||
|
||||
out := collectRanked(scoreNorm)
|
||||
require := require.New(t)
|
||||
require.Len(out, 0)
|
||||
must.Len(t, 0, out)
|
||||
}
|
||||
|
||||
func TestBinPackIterator_PlannedAlloc(t *testing.T) {
|
||||
@@ -1377,9 +1371,9 @@ func TestBinPackIterator_ReservedCores(t *testing.T) {
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
must.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
|
||||
taskGroup := &structs.TaskGroup{
|
||||
EphemeralDisk: &structs.EphemeralDisk{},
|
||||
@@ -1403,10 +1397,9 @@ func TestBinPackIterator_ReservedCores(t *testing.T) {
|
||||
scoreNorm := NewScoreNormalizationIterator(ctx, binp)
|
||||
|
||||
out := collectRanked(scoreNorm)
|
||||
require := require.New(t)
|
||||
require.Len(out, 1)
|
||||
require.Equal(nodes[1].Node.ID, out[0].Node.ID)
|
||||
require.Equal([]uint16{1}, out[0].TaskResources["web"].Cpu.ReservedCores)
|
||||
must.Len(t, 1, out)
|
||||
must.Eq(t, nodes[1].Node.ID, out[0].Node.ID)
|
||||
must.Eq(t, []uint16{1}, out[0].TaskResources["web"].Cpu.ReservedCores)
|
||||
}
|
||||
|
||||
func TestBinPackIterator_ExistingAlloc(t *testing.T) {
|
||||
@@ -1489,9 +1482,9 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) {
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
must.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
|
||||
taskGroup := &structs.TaskGroup{
|
||||
EphemeralDisk: &structs.EphemeralDisk{},
|
||||
@@ -1603,9 +1596,9 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) {
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
must.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
|
||||
// Add a planned eviction to alloc1
|
||||
plan := ctx.Plan()
|
||||
@@ -2319,13 +2312,12 @@ func TestNodeAntiAffinity_PenaltyNodes(t *testing.T) {
|
||||
|
||||
out := collectRanked(scoreNorm)
|
||||
|
||||
require := require.New(t)
|
||||
require.Equal(2, len(out))
|
||||
require.Equal(node1.ID, out[0].Node.ID)
|
||||
require.Equal(-1.0, out[0].FinalScore)
|
||||
must.Eq(t, 2, len(out))
|
||||
must.Eq(t, node1.ID, out[0].Node.ID)
|
||||
must.Eq(t, -1.0, out[0].FinalScore)
|
||||
|
||||
require.Equal(node2.ID, out[1].Node.ID)
|
||||
require.Equal(0.0, out[1].FinalScore)
|
||||
must.Eq(t, node2.ID, out[1].Node.ID)
|
||||
must.Eq(t, 0.0, out[1].FinalScore)
|
||||
|
||||
}
|
||||
|
||||
@@ -2383,15 +2375,14 @@ func TestScoreNormalizationIterator(t *testing.T) {
|
||||
scoreNorm := NewScoreNormalizationIterator(ctx, nodeReschedulePenaltyIter)
|
||||
|
||||
out := collectRanked(scoreNorm)
|
||||
require := require.New(t)
|
||||
|
||||
require.Equal(2, len(out))
|
||||
require.Equal(out[0], nodes[0])
|
||||
must.Eq(t, 2, len(out))
|
||||
must.Eq(t, nodes[0], out[0])
|
||||
// Score should be averaged between both scorers
|
||||
// -0.75 from job anti affinity and -1 from node rescheduling penalty
|
||||
require.Equal(-0.875, out[0].FinalScore)
|
||||
require.Equal(out[1], nodes[1])
|
||||
require.Equal(out[1].FinalScore, 0.0)
|
||||
must.Eq(t, -0.875, out[0].FinalScore)
|
||||
must.Eq(t, nodes[1], out[1])
|
||||
must.Eq(t, 0.0, out[1].FinalScore)
|
||||
}
|
||||
|
||||
func TestNodeAffinityIterator(t *testing.T) {
|
||||
|
||||
@@ -5,7 +5,6 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"testing"
|
||||
@@ -277,10 +276,9 @@ func assertResults(t *testing.T, r *reconcileResults, exp *resultExpectation) {
|
||||
} else if exp.createDeployment != nil && r.deployment != nil {
|
||||
// Clear the deployment ID
|
||||
r.deployment.ID, exp.createDeployment.ID = "", ""
|
||||
if !reflect.DeepEqual(r.deployment, exp.createDeployment) {
|
||||
t.Errorf("Unexpected createdDeployment; got\n %#v\nwant\n%#v\nDiff: %v",
|
||||
r.deployment, exp.createDeployment, pretty.Diff(r.deployment, exp.createDeployment))
|
||||
}
|
||||
must.Eq(t, exp.createDeployment, r.deployment, must.Sprintf(
|
||||
"Unexpected createdDeployment; got\n %#v\nwant\n%#v\nDiff: %v",
|
||||
r.deployment, exp.createDeployment, pretty.Diff(r.deployment, exp.createDeployment)))
|
||||
}
|
||||
|
||||
test.Eq(t, exp.deploymentUpdates, r.deploymentUpdates, test.Sprint("Expected Deployment Updates"))
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/shoenig/test/must"
|
||||
)
|
||||
|
||||
func TestLimitIterator(t *testing.T) {
|
||||
@@ -313,12 +313,11 @@ func TestLimitIterator_ScoreThreshold(t *testing.T) {
|
||||
limit := NewLimitIterator(ctx, static, 1, 0, 2)
|
||||
limit.SetLimit(2)
|
||||
out := collectRanked(limit)
|
||||
require := require.New(t)
|
||||
require.Equal(tc.expectedOut, out)
|
||||
must.Eq(t, tc.expectedOut, out)
|
||||
|
||||
limit.Reset()
|
||||
require.Equal(0, limit.skippedNodeIndex)
|
||||
require.Equal(0, len(limit.skippedNodes))
|
||||
must.Eq(t, 0, limit.skippedNodeIndex)
|
||||
must.Eq(t, 0, len(limit.skippedNodes))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/shoenig/test"
|
||||
"github.com/shoenig/test/must"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/go-set/v3"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
@@ -100,7 +99,7 @@ func TestSpreadIterator_SingleAttribute(t *testing.T) {
|
||||
"dc2": 0.5,
|
||||
}
|
||||
for _, rn := range out {
|
||||
require.Equal(t, expectedScores[rn.Node.Datacenter], rn.FinalScore)
|
||||
must.Eq(t, expectedScores[rn.Node.Datacenter], rn.FinalScore)
|
||||
}
|
||||
|
||||
// Update the plan to add more allocs to nodes in dc1
|
||||
@@ -179,7 +178,7 @@ func TestSpreadIterator_SingleAttribute(t *testing.T) {
|
||||
"dc2": 0.5,
|
||||
}
|
||||
for _, rn := range out {
|
||||
require.Equal(t, expectedScores[rn.Node.Datacenter], rn.FinalScore)
|
||||
must.Eq(t, expectedScores[rn.Node.Datacenter], rn.FinalScore)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -281,7 +280,7 @@ func TestSpreadIterator_MultipleAttributes(t *testing.T) {
|
||||
nodes[3].Node.ID: 0.556,
|
||||
}
|
||||
for _, rn := range out {
|
||||
require.Equal(t, fmt.Sprintf("%.3f", expectedScores[rn.Node.ID]), fmt.Sprintf("%.3f", rn.FinalScore))
|
||||
must.Eq(t, fmt.Sprintf("%.3f", expectedScores[rn.Node.ID]), fmt.Sprintf("%.3f", rn.FinalScore))
|
||||
}
|
||||
|
||||
}
|
||||
@@ -328,7 +327,7 @@ func TestSpreadIterator_EvenSpread(t *testing.T) {
|
||||
"dc2": 0,
|
||||
}
|
||||
for _, rn := range out {
|
||||
require.Equal(t, fmt.Sprintf("%.3f", expectedScores[rn.Node.Datacenter]), fmt.Sprintf("%.3f", rn.FinalScore))
|
||||
must.Eq(t, fmt.Sprintf("%.3f", expectedScores[rn.Node.Datacenter]), fmt.Sprintf("%.3f", rn.FinalScore))
|
||||
|
||||
}
|
||||
|
||||
@@ -374,7 +373,7 @@ func TestSpreadIterator_EvenSpread(t *testing.T) {
|
||||
"dc2": 1,
|
||||
}
|
||||
for _, rn := range out {
|
||||
require.Equal(t, expectedScores[rn.Node.Datacenter], rn.FinalScore)
|
||||
must.Eq(t, expectedScores[rn.Node.Datacenter], rn.FinalScore)
|
||||
}
|
||||
|
||||
// Update the plan to add more allocs to nodes in dc2
|
||||
@@ -427,7 +426,7 @@ func TestSpreadIterator_EvenSpread(t *testing.T) {
|
||||
"dc2": -0.5,
|
||||
}
|
||||
for _, rn := range out {
|
||||
require.Equal(t, fmt.Sprintf("%3.3f", expectedScores[rn.Node.Datacenter]), fmt.Sprintf("%3.3f", rn.FinalScore))
|
||||
must.Eq(t, fmt.Sprintf("%3.3f", expectedScores[rn.Node.Datacenter]), fmt.Sprintf("%3.3f", rn.FinalScore))
|
||||
}
|
||||
|
||||
// Add another node in dc3
|
||||
@@ -470,7 +469,7 @@ func TestSpreadIterator_EvenSpread(t *testing.T) {
|
||||
"dc3": 1,
|
||||
}
|
||||
for _, rn := range out {
|
||||
require.Equal(t, fmt.Sprintf("%.3f", expectedScores[rn.Node.Datacenter]), fmt.Sprintf("%.3f", rn.FinalScore))
|
||||
must.Eq(t, fmt.Sprintf("%.3f", expectedScores[rn.Node.Datacenter]), fmt.Sprintf("%.3f", rn.FinalScore))
|
||||
}
|
||||
|
||||
}
|
||||
@@ -525,7 +524,7 @@ func TestSpreadIterator_MaxPenalty(t *testing.T) {
|
||||
|
||||
// All nodes are in dc3 so score should be -1
|
||||
for _, rn := range out {
|
||||
require.Equal(t, -1.0, rn.FinalScore)
|
||||
must.Eq(t, -1.0, rn.FinalScore)
|
||||
}
|
||||
|
||||
// Reset scores
|
||||
@@ -560,7 +559,7 @@ func TestSpreadIterator_MaxPenalty(t *testing.T) {
|
||||
|
||||
// All nodes don't have the spread attribute so score should be -1
|
||||
for _, rn := range out {
|
||||
require.Equal(t, -1.0, rn.FinalScore)
|
||||
must.Eq(t, -1.0, rn.FinalScore)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -685,8 +684,8 @@ func Test_evenSpreadScoreBoost(t *testing.T) {
|
||||
Datacenter: "dc2",
|
||||
}
|
||||
boost := evenSpreadScoreBoost(pset, opt)
|
||||
require.False(t, math.IsInf(boost, 1))
|
||||
require.Equal(t, 1.0, boost)
|
||||
must.False(t, math.IsInf(boost, 1))
|
||||
must.Eq(t, 1.0, boost)
|
||||
}
|
||||
|
||||
// TestSpreadOnLargeCluster exercises potentially quadratic
|
||||
@@ -759,20 +758,20 @@ func TestSpreadOnLargeCluster(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
h := NewHarness(t)
|
||||
err := upsertNodes(h, tc.nodeCount, tc.racks)
|
||||
require.NoError(t, err)
|
||||
must.NoError(t, err)
|
||||
job := generateJob(tc.allocs)
|
||||
eval, err := upsertJob(h, job)
|
||||
require.NoError(t, err)
|
||||
must.NoError(t, err)
|
||||
|
||||
start := time.Now()
|
||||
err = h.Process(NewServiceScheduler, eval)
|
||||
require.NoError(t, err)
|
||||
require.LessOrEqual(t, time.Since(start), time.Duration(60*time.Second),
|
||||
"time to evaluate exceeded EvalNackTimeout")
|
||||
must.NoError(t, err)
|
||||
must.LessEq(t, time.Duration(60*time.Second), time.Since(start),
|
||||
must.Sprint("time to evaluate exceeded EvalNackTimeout"))
|
||||
|
||||
require.Len(t, h.Plans, 1)
|
||||
require.False(t, h.Plans[0].IsNoOp())
|
||||
require.NoError(t, validateEqualSpread(h))
|
||||
must.Len(t, 1, h.Plans)
|
||||
must.False(t, h.Plans[0].IsNoOp())
|
||||
must.NoError(t, validateEqualSpread(h))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -940,7 +939,7 @@ func TestSpreadPanicDowngrade(t *testing.T) {
|
||||
nodes = append(nodes, node)
|
||||
err := h.State.UpsertNode(structs.MsgTypeTestSetup,
|
||||
h.NextIndex(), node)
|
||||
require.NoError(t, err)
|
||||
must.NoError(t, err)
|
||||
}
|
||||
|
||||
// job version 1
|
||||
@@ -974,7 +973,7 @@ func TestSpreadPanicDowngrade(t *testing.T) {
|
||||
job1.Version = 1
|
||||
job1.TaskGroups[0].Count = 5
|
||||
err := h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)
|
||||
require.NoError(t, err)
|
||||
must.NoError(t, err)
|
||||
|
||||
allocs := []*structs.Allocation{}
|
||||
for i := 0; i < 4; i++ {
|
||||
@@ -997,7 +996,7 @@ func TestSpreadPanicDowngrade(t *testing.T) {
|
||||
allocs = append(allocs, alloc)
|
||||
}
|
||||
err = h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)
|
||||
require.NoError(t, err)
|
||||
must.NoError(t, err)
|
||||
|
||||
// job version 2
|
||||
// max_parallel = 0, canary = 1, spread == nil
|
||||
@@ -1006,7 +1005,7 @@ func TestSpreadPanicDowngrade(t *testing.T) {
|
||||
job2.Version = 2
|
||||
job2.Spreads = nil
|
||||
err = h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)
|
||||
require.NoError(t, err)
|
||||
must.NoError(t, err)
|
||||
|
||||
eval := &structs.Evaluation{
|
||||
Namespace: job2.Namespace,
|
||||
@@ -1018,11 +1017,11 @@ func TestSpreadPanicDowngrade(t *testing.T) {
|
||||
}
|
||||
err = h.State.UpsertEvals(structs.MsgTypeTestSetup,
|
||||
h.NextIndex(), []*structs.Evaluation{eval})
|
||||
require.NoError(t, err)
|
||||
must.NoError(t, err)
|
||||
|
||||
processErr := h.Process(NewServiceScheduler, eval)
|
||||
require.NoError(t, processErr, "failed to process eval")
|
||||
require.Len(t, h.Plans, 1)
|
||||
must.NoError(t, processErr, must.Sprintf("..."))
|
||||
must.Len(t, 1, h.Plans)
|
||||
}
|
||||
|
||||
func TestSpread_ImplicitTargets(t *testing.T) {
|
||||
|
||||
@@ -5,7 +5,6 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
@@ -82,9 +81,8 @@ func TestServiceStack_SetNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
out := collectFeasible(stack.source)
|
||||
if !reflect.DeepEqual(out, nodes) {
|
||||
t.Fatalf("bad: %#v", out)
|
||||
}
|
||||
must.Eq(t, nodes, out)
|
||||
|
||||
}
|
||||
|
||||
func TestServiceStack_SetJob(t *testing.T) {
|
||||
@@ -99,9 +97,7 @@ func TestServiceStack_SetJob(t *testing.T) {
|
||||
if stack.binPack.priority != job.Priority {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
if !reflect.DeepEqual(stack.jobConstraint.constraints, job.Constraints) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
must.Eq(t, stack.jobConstraint.constraints, job.Constraints)
|
||||
}
|
||||
|
||||
func TestServiceStack_Select_Size(t *testing.T) {
|
||||
|
||||
@@ -9,13 +9,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/state"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/shoenig/test/must"
|
||||
)
|
||||
|
||||
// RejectPlan is used to always reject the entire plan and force a state refresh
|
||||
@@ -306,9 +305,9 @@ func (h *Harness) Process(factory Factory, eval *structs.Evaluation) error {
|
||||
}
|
||||
|
||||
func (h *Harness) AssertEvalStatus(t testing.TB, state string) {
|
||||
require.Len(t, h.Evals, 1)
|
||||
must.Len(t, 1, h.Evals)
|
||||
update := h.Evals[0]
|
||||
require.Equal(t, state, update.Status)
|
||||
must.Eq(t, state, update.Status)
|
||||
}
|
||||
|
||||
func (h *Harness) SetNoSubmit() {
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -16,7 +15,6 @@ import (
|
||||
"github.com/hashicorp/nomad/nomad/state"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/shoenig/test/must"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkTasksUpdated(b *testing.B) {
|
||||
@@ -136,8 +134,8 @@ func TestRetryMax(t *testing.T) {
|
||||
return false, nil
|
||||
}
|
||||
err := retryMax(3, bad, nil)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, 3, calls, "mis match")
|
||||
must.Error(t, err)
|
||||
must.Eq(t, 3, calls)
|
||||
|
||||
calls = 0
|
||||
first := true
|
||||
@@ -149,8 +147,8 @@ func TestRetryMax(t *testing.T) {
|
||||
return false
|
||||
}
|
||||
err = retryMax(3, bad, reset)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, 6, calls, "mis match")
|
||||
must.Error(t, err)
|
||||
must.Eq(t, 6, calls)
|
||||
|
||||
calls = 0
|
||||
good := func() (bool, error) {
|
||||
@@ -158,8 +156,8 @@ func TestRetryMax(t *testing.T) {
|
||||
return true, nil
|
||||
}
|
||||
err = retryMax(3, good, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, calls, "mis match")
|
||||
must.NoError(t, err)
|
||||
must.Eq(t, 1, calls)
|
||||
}
|
||||
|
||||
func TestTaintedNodes(t *testing.T) {
|
||||
@@ -173,10 +171,10 @@ func TestTaintedNodes(t *testing.T) {
|
||||
node3.Datacenter = "dc2"
|
||||
node3.Status = structs.NodeStatusDown
|
||||
node4 := mock.DrainNode()
|
||||
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node1))
|
||||
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1001, node2))
|
||||
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, node3))
|
||||
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1003, node4))
|
||||
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node1))
|
||||
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1001, node2))
|
||||
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, node3))
|
||||
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1003, node4))
|
||||
|
||||
allocs := []*structs.Allocation{
|
||||
{NodeID: node1.ID},
|
||||
@@ -186,19 +184,19 @@ func TestTaintedNodes(t *testing.T) {
|
||||
{NodeID: "12345678-abcd-efab-cdef-123456789abc"},
|
||||
}
|
||||
tainted, err := taintedNodes(state, allocs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(tainted))
|
||||
require.NotContains(t, tainted, node1.ID)
|
||||
require.NotContains(t, tainted, node2.ID)
|
||||
must.NoError(t, err)
|
||||
must.Eq(t, 3, len(tainted))
|
||||
must.MapNotContainsKey(t, tainted, node1.ID)
|
||||
must.MapNotContainsKey(t, tainted, node2.ID)
|
||||
|
||||
require.Contains(t, tainted, node3.ID)
|
||||
require.NotNil(t, tainted[node3.ID])
|
||||
must.MapContainsKey(t, tainted, node3.ID)
|
||||
must.NotNil(t, tainted[node3.ID])
|
||||
|
||||
require.Contains(t, tainted, node4.ID)
|
||||
require.NotNil(t, tainted[node4.ID])
|
||||
must.MapContainsKey(t, tainted, node4.ID)
|
||||
must.NotNil(t, tainted[node4.ID])
|
||||
|
||||
require.Contains(t, tainted, "12345678-abcd-efab-cdef-123456789abc")
|
||||
require.Nil(t, tainted["12345678-abcd-efab-cdef-123456789abc"])
|
||||
must.MapContainsKey(t, tainted, "12345678-abcd-efab-cdef-123456789abc")
|
||||
must.Nil(t, tainted["12345678-abcd-efab-cdef-123456789abc"])
|
||||
}
|
||||
|
||||
func TestShuffleNodes(t *testing.T) {
|
||||
@@ -223,13 +221,13 @@ func TestShuffleNodes(t *testing.T) {
|
||||
eval := mock.Eval() // will have random EvalID
|
||||
plan := eval.MakePlan(mock.Job())
|
||||
shuffleNodes(plan, 1000, nodes)
|
||||
require.False(t, reflect.DeepEqual(nodes, orig))
|
||||
must.NotEq(t, nodes, orig)
|
||||
|
||||
nodes2 := make([]*structs.Node, len(nodes))
|
||||
copy(nodes2, orig)
|
||||
shuffleNodes(plan, 1000, nodes2)
|
||||
|
||||
require.True(t, reflect.DeepEqual(nodes, nodes2))
|
||||
must.Eq(t, nodes, nodes2)
|
||||
|
||||
}
|
||||
|
||||
@@ -602,58 +600,58 @@ func TestSetStatus(t *testing.T) {
|
||||
eval := mock.Eval()
|
||||
status := "a"
|
||||
desc := "b"
|
||||
require.NoError(t, setStatus(logger, h, eval, nil, nil, nil, status, desc, nil, ""))
|
||||
require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
|
||||
must.NoError(t, setStatus(logger, h, eval, nil, nil, nil, status, desc, nil, ""))
|
||||
must.Eq(t, 1, len(h.Evals), must.Sprintf("setStatus() didn't update plan: %v", h.Evals))
|
||||
|
||||
newEval := h.Evals[0]
|
||||
require.True(t, newEval.ID == eval.ID && newEval.Status == status && newEval.StatusDescription == desc,
|
||||
"setStatus() submited invalid eval: %v", newEval)
|
||||
must.True(t, newEval.ID == eval.ID && newEval.Status == status && newEval.StatusDescription == desc,
|
||||
must.Sprintf("setStatus() submited invalid eval: %v", newEval))
|
||||
|
||||
// Test next evals
|
||||
h = NewHarness(t)
|
||||
next := mock.Eval()
|
||||
require.NoError(t, setStatus(logger, h, eval, next, nil, nil, status, desc, nil, ""))
|
||||
require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
|
||||
must.NoError(t, setStatus(logger, h, eval, next, nil, nil, status, desc, nil, ""))
|
||||
must.Eq(t, 1, len(h.Evals), must.Sprintf("setStatus() didn't update plan: %v", h.Evals))
|
||||
|
||||
newEval = h.Evals[0]
|
||||
require.Equal(t, next.ID, newEval.NextEval, "setStatus() didn't set nextEval correctly: %v", newEval)
|
||||
must.Eq(t, next.ID, newEval.NextEval, must.Sprintf("setStatus() didn't set nextEval correctly: %v", newEval))
|
||||
|
||||
// Test blocked evals
|
||||
h = NewHarness(t)
|
||||
blocked := mock.Eval()
|
||||
require.NoError(t, setStatus(logger, h, eval, nil, blocked, nil, status, desc, nil, ""))
|
||||
require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
|
||||
must.NoError(t, setStatus(logger, h, eval, nil, blocked, nil, status, desc, nil, ""))
|
||||
must.Eq(t, 1, len(h.Evals), must.Sprintf("setStatus() didn't update plan: %v", h.Evals))
|
||||
|
||||
newEval = h.Evals[0]
|
||||
require.Equal(t, blocked.ID, newEval.BlockedEval, "setStatus() didn't set BlockedEval correctly: %v", newEval)
|
||||
must.Eq(t, blocked.ID, newEval.BlockedEval, must.Sprintf("setStatus() didn't set BlockedEval correctly: %v", newEval))
|
||||
|
||||
// Test metrics
|
||||
h = NewHarness(t)
|
||||
metrics := map[string]*structs.AllocMetric{"foo": nil}
|
||||
require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, nil, ""))
|
||||
require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
|
||||
must.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, nil, ""))
|
||||
must.Eq(t, 1, len(h.Evals), must.Sprintf("setStatus() didn't update plan: %v", h.Evals))
|
||||
|
||||
newEval = h.Evals[0]
|
||||
require.True(t, reflect.DeepEqual(newEval.FailedTGAllocs, metrics),
|
||||
"setStatus() didn't set failed task group metrics correctly: %v", newEval)
|
||||
must.Eq(t, newEval.FailedTGAllocs, metrics,
|
||||
must.Sprintf("setStatus() didn't set failed task group metrics correctly: %v", newEval))
|
||||
|
||||
// Test queued allocations
|
||||
h = NewHarness(t)
|
||||
queuedAllocs := map[string]int{"web": 1}
|
||||
|
||||
require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, ""))
|
||||
require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
|
||||
must.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, ""))
|
||||
must.Eq(t, 1, len(h.Evals), must.Sprintf("setStatus() didn't update plan: %v", h.Evals))
|
||||
|
||||
newEval = h.Evals[0]
|
||||
require.True(t, reflect.DeepEqual(newEval.QueuedAllocations, queuedAllocs), "setStatus() didn't set failed task group metrics correctly: %v", newEval)
|
||||
must.Eq(t, newEval.QueuedAllocations, queuedAllocs, must.Sprintf("setStatus() didn't set failed task group metrics correctly: %v", newEval))
|
||||
|
||||
h = NewHarness(t)
|
||||
dID := uuid.Generate()
|
||||
require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, dID))
|
||||
require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
|
||||
must.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, dID))
|
||||
must.Eq(t, 1, len(h.Evals), must.Sprintf("setStatus() didn't update plan: %v", h.Evals))
|
||||
|
||||
newEval = h.Evals[0]
|
||||
require.Equal(t, dID, newEval.DeploymentID, "setStatus() didn't set deployment id correctly: %v", newEval)
|
||||
must.Eq(t, dID, newEval.DeploymentID, must.Sprintf("setStatus() didn't set deployment id correctly: %v", newEval))
|
||||
}
|
||||
|
||||
func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
|
||||
@@ -664,7 +662,7 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
|
||||
job := mock.Job()
|
||||
|
||||
node := mock.Node()
|
||||
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
||||
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
||||
|
||||
// Register an alloc
|
||||
alloc := &structs.Allocation{
|
||||
@@ -690,8 +688,8 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
|
||||
TaskGroup: "web",
|
||||
}
|
||||
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
||||
require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
|
||||
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
||||
must.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
|
||||
must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
||||
|
||||
// Create a new task group that prevents in-place updates.
|
||||
tg := &structs.TaskGroup{}
|
||||
@@ -709,8 +707,8 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
|
||||
// Do the inplace update.
|
||||
unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
||||
|
||||
require.True(t, len(unplaced) == 1 && len(inplace) == 0, "inplaceUpdate incorrectly did an inplace update")
|
||||
require.Empty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update")
|
||||
must.True(t, len(unplaced) == 1 && len(inplace) == 0, must.Sprint("inplaceUpdate incorrectly did an inplace update"))
|
||||
must.MapEmpty(t, ctx.plan.NodeAllocation, must.Sprint("inplaceUpdate incorrectly did an inplace update"))
|
||||
}
|
||||
|
||||
func TestInplaceUpdate_AllocatedResources(t *testing.T) {
|
||||
@@ -721,7 +719,7 @@ func TestInplaceUpdate_AllocatedResources(t *testing.T) {
|
||||
job := mock.Job()
|
||||
|
||||
node := mock.Node()
|
||||
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
||||
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
||||
|
||||
// Register an alloc
|
||||
alloc := &structs.Allocation{
|
||||
@@ -746,8 +744,8 @@ func TestInplaceUpdate_AllocatedResources(t *testing.T) {
|
||||
TaskGroup: "web",
|
||||
}
|
||||
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
||||
require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
|
||||
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
||||
must.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
|
||||
must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
||||
|
||||
// Update TG to add a new service (inplace)
|
||||
tg := job.TaskGroups[0]
|
||||
@@ -763,13 +761,13 @@ func TestInplaceUpdate_AllocatedResources(t *testing.T) {
|
||||
// Do the inplace update.
|
||||
unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
||||
|
||||
require.True(t, len(unplaced) == 0 && len(inplace) == 1, "inplaceUpdate incorrectly did not perform an inplace update")
|
||||
require.NotEmpty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update")
|
||||
require.NotEmpty(t, ctx.plan.NodeAllocation[node.ID][0].AllocatedResources.Shared.Ports)
|
||||
must.True(t, len(unplaced) == 0 && len(inplace) == 1, must.Sprint("inplaceUpdate incorrectly did not perform an inplace update"))
|
||||
must.MapNotEmpty(t, ctx.plan.NodeAllocation, must.Sprint("inplaceUpdate incorrectly did an inplace update"))
|
||||
must.SliceNotEmpty(t, ctx.plan.NodeAllocation[node.ID][0].AllocatedResources.Shared.Ports)
|
||||
|
||||
port, ok := ctx.plan.NodeAllocation[node.ID][0].AllocatedResources.Shared.Ports.Get("api-port")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 19910, port.Value)
|
||||
must.True(t, ok)
|
||||
must.Eq(t, 19910, port.Value)
|
||||
}
|
||||
|
||||
func TestInplaceUpdate_NoMatch(t *testing.T) {
|
||||
@@ -780,7 +778,7 @@ func TestInplaceUpdate_NoMatch(t *testing.T) {
|
||||
job := mock.Job()
|
||||
|
||||
node := mock.Node()
|
||||
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
||||
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
||||
|
||||
// Register an alloc
|
||||
alloc := &structs.Allocation{
|
||||
@@ -806,8 +804,8 @@ func TestInplaceUpdate_NoMatch(t *testing.T) {
|
||||
TaskGroup: "web",
|
||||
}
|
||||
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
||||
require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
|
||||
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
||||
must.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
|
||||
must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
||||
|
||||
// Create a new task group that requires too much resources.
|
||||
tg := &structs.TaskGroup{}
|
||||
@@ -821,8 +819,8 @@ func TestInplaceUpdate_NoMatch(t *testing.T) {
|
||||
// Do the inplace update.
|
||||
unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
||||
|
||||
require.True(t, len(unplaced) == 1 && len(inplace) == 0, "inplaceUpdate incorrectly did an inplace update")
|
||||
require.Empty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update")
|
||||
must.True(t, len(unplaced) == 1 && len(inplace) == 0, must.Sprint("inplaceUpdate incorrectly did an inplace update"))
|
||||
must.MapEmpty(t, ctx.plan.NodeAllocation, must.Sprint("inplaceUpdate incorrectly did an inplace update"))
|
||||
}
|
||||
|
||||
func TestInplaceUpdate_Success(t *testing.T) {
|
||||
@@ -833,7 +831,7 @@ func TestInplaceUpdate_Success(t *testing.T) {
|
||||
job := mock.Job()
|
||||
|
||||
node := mock.Node()
|
||||
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
||||
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
||||
|
||||
// Register an alloc
|
||||
alloc := &structs.Allocation{
|
||||
@@ -859,8 +857,8 @@ func TestInplaceUpdate_Success(t *testing.T) {
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
}
|
||||
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
||||
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)))
|
||||
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
||||
must.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)))
|
||||
must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
||||
|
||||
// Create a new task group that updates the resources.
|
||||
tg := &structs.TaskGroup{}
|
||||
@@ -891,23 +889,23 @@ func TestInplaceUpdate_Success(t *testing.T) {
|
||||
// Do the inplace update.
|
||||
unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
||||
|
||||
require.True(t, len(unplaced) == 0 && len(inplace) == 1, "inplaceUpdate did not do an inplace update")
|
||||
require.Equal(t, 1, len(ctx.plan.NodeAllocation), "inplaceUpdate did not do an inplace update")
|
||||
require.Equal(t, alloc.ID, inplace[0].Alloc.ID, "inplaceUpdate returned the wrong, inplace updated alloc: %#v", inplace)
|
||||
must.True(t, len(unplaced) == 0 && len(inplace) == 1, must.Sprint("inplaceUpdate did not do an inplace update"))
|
||||
must.Eq(t, 1, len(ctx.plan.NodeAllocation), must.Sprint("inplaceUpdate did not do an inplace update"))
|
||||
must.Eq(t, alloc.ID, inplace[0].Alloc.ID, must.Sprintf("inplaceUpdate returned the wrong, inplace updated alloc: %#v", inplace))
|
||||
|
||||
// Get the alloc we inserted.
|
||||
a := inplace[0].Alloc // TODO(sean@): Verify this is correct vs: ctx.plan.NodeAllocation[alloc.NodeID][0]
|
||||
require.NotNil(t, a.Job)
|
||||
require.Equal(t, 1, len(a.Job.TaskGroups))
|
||||
require.Equal(t, 1, len(a.Job.TaskGroups[0].Tasks))
|
||||
require.Equal(t, 3, len(a.Job.TaskGroups[0].Tasks[0].Services),
|
||||
"Expected number of services: %v, Actual: %v", 3, len(a.Job.TaskGroups[0].Tasks[0].Services))
|
||||
must.NotNil(t, a.Job)
|
||||
must.Eq(t, 1, len(a.Job.TaskGroups))
|
||||
must.Eq(t, 1, len(a.Job.TaskGroups[0].Tasks))
|
||||
must.Eq(t, 3, len(a.Job.TaskGroups[0].Tasks[0].Services), must.Sprintf(
|
||||
"Expected number of services: %v, Actual: %v", 3, len(a.Job.TaskGroups[0].Tasks[0].Services)))
|
||||
|
||||
serviceNames := make(map[string]struct{}, 3)
|
||||
for _, consulService := range a.Job.TaskGroups[0].Tasks[0].Services {
|
||||
serviceNames[consulService.Name] = struct{}{}
|
||||
}
|
||||
require.Equal(t, 3, len(serviceNames))
|
||||
must.Eq(t, 3, len(serviceNames))
|
||||
|
||||
for _, name := range []string{"dummy-service", "dummy-service2", "web-frontend"} {
|
||||
if _, found := serviceNames[name]; !found {
|
||||
@@ -1051,23 +1049,23 @@ func TestUtil_connectSidecarServiceUpdated(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
t.Run("both nil", func(t *testing.T) {
|
||||
require.False(t, connectSidecarServiceUpdated(nil, nil).modified)
|
||||
must.False(t, connectSidecarServiceUpdated(nil, nil).modified)
|
||||
})
|
||||
|
||||
t.Run("one nil", func(t *testing.T) {
|
||||
require.True(t, connectSidecarServiceUpdated(nil, new(structs.ConsulSidecarService)).modified)
|
||||
must.True(t, connectSidecarServiceUpdated(nil, new(structs.ConsulSidecarService)).modified)
|
||||
})
|
||||
|
||||
t.Run("ports differ", func(t *testing.T) {
|
||||
a := &structs.ConsulSidecarService{Port: "1111"}
|
||||
b := &structs.ConsulSidecarService{Port: "2222"}
|
||||
require.True(t, connectSidecarServiceUpdated(a, b).modified)
|
||||
must.True(t, connectSidecarServiceUpdated(a, b).modified)
|
||||
})
|
||||
|
||||
t.Run("same", func(t *testing.T) {
|
||||
a := &structs.ConsulSidecarService{Port: "1111"}
|
||||
b := &structs.ConsulSidecarService{Port: "1111"}
|
||||
require.False(t, connectSidecarServiceUpdated(a, b).modified)
|
||||
must.False(t, connectSidecarServiceUpdated(a, b).modified)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1144,17 +1142,17 @@ func TestTaskGroupConstraints(t *testing.T) {
|
||||
expDrivers := map[string]struct{}{"exec": {}, "docker": {}}
|
||||
|
||||
actConstrains := taskGroupConstraints(tg)
|
||||
require.True(t, reflect.DeepEqual(actConstrains.constraints, expConstr),
|
||||
"taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr)
|
||||
require.True(t, reflect.DeepEqual(actConstrains.drivers, expDrivers),
|
||||
"taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers)
|
||||
must.Eq(t, actConstrains.constraints, expConstr, must.Sprintf(
|
||||
"taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr))
|
||||
must.Eq(t, actConstrains.drivers, expDrivers, must.Sprintf(
|
||||
"taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers))
|
||||
}
|
||||
|
||||
func TestProgressMade(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
noopPlan := &structs.PlanResult{}
|
||||
require.False(t, progressMade(nil) || progressMade(noopPlan), "no progress plan marked as making progress")
|
||||
must.False(t, progressMade(nil) || progressMade(noopPlan), must.Sprint("no progress plan marked as making progress"))
|
||||
|
||||
m := map[string][]*structs.Allocation{
|
||||
"foo": {mock.Alloc()},
|
||||
@@ -1172,7 +1170,7 @@ func TestProgressMade(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
require.True(t, progressMade(both) && progressMade(update) && progressMade(alloc) &&
|
||||
must.True(t, progressMade(both) && progressMade(update) && progressMade(alloc) &&
|
||||
progressMade(deployment) && progressMade(deploymentUpdates))
|
||||
}
|
||||
|
||||
@@ -1231,7 +1229,7 @@ func TestDesiredUpdates(t *testing.T) {
|
||||
}
|
||||
|
||||
desired := desiredUpdates(diff, inplace, destructive)
|
||||
require.True(t, reflect.DeepEqual(desired, expected), "desiredUpdates() returned %#v; want %#v", desired, expected)
|
||||
must.Eq(t, desired, expected, must.Sprintf("desiredUpdates() returned %#v; want %#v", desired, expected))
|
||||
}
|
||||
|
||||
func TestUtil_AdjustQueuedAllocations(t *testing.T) {
|
||||
@@ -1268,7 +1266,7 @@ func TestUtil_AdjustQueuedAllocations(t *testing.T) {
|
||||
queuedAllocs := map[string]int{"web": 2}
|
||||
adjustQueuedAllocations(logger, &planResult, queuedAllocs)
|
||||
|
||||
require.Equal(t, 1, queuedAllocs["web"])
|
||||
must.Eq(t, 1, queuedAllocs["web"])
|
||||
}
|
||||
|
||||
func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) {
|
||||
@@ -1308,7 +1306,7 @@ func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) {
|
||||
allocsLost = append(allocsLost, alloc.ID)
|
||||
}
|
||||
expected := []string{alloc1.ID, alloc2.ID}
|
||||
require.True(t, reflect.DeepEqual(allocsLost, expected), "actual: %v, expected: %v", allocsLost, expected)
|
||||
must.Eq(t, allocsLost, expected, must.Sprintf("actual: %v, expected: %v", allocsLost, expected))
|
||||
|
||||
// Update the node status to ready and try again
|
||||
plan = structs.Plan{
|
||||
@@ -1322,7 +1320,7 @@ func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) {
|
||||
allocsLost = append(allocsLost, alloc.ID)
|
||||
}
|
||||
expected = []string{}
|
||||
require.True(t, reflect.DeepEqual(allocsLost, expected), "actual: %v, expected: %v", allocsLost, expected)
|
||||
must.Eq(t, allocsLost, expected, must.Sprintf("actual: %v, expected: %v", allocsLost, expected))
|
||||
}
|
||||
|
||||
func TestTaskGroupUpdated_Restart(t *testing.T) {
|
||||
|
||||
Reference in New Issue
Block a user