Merge pull request #6792 from hashicorp/b-propose-panic

scheduler: fix panic when preempting and evicting allocs
This commit is contained in:
Michael Schurter
2019-12-03 10:40:19 -08:00
parent 44885594aa
commit 263f89eee1
8 changed files with 500 additions and 376 deletions

View File

@@ -26,6 +26,7 @@ BUG FIXES:
* core: Ignore `server` config values if `server` is disabled [[GH-6047](https://github.com/hashicorp/nomad/issues/6047)]
* core: Added `semver` constraint for strict Semver 2.0 version comparisons [[GH-6699](https://github.com/hashicorp/nomad/issues/6699)]
* core: Fixed server panic caused by a plan evicting and preempting allocs on a node [[GH-6792](https://github.com/hashicorp/nomad/issues/6792)]
* api: Return a 404 if endpoint not found instead of redirecting to /ui/ [[GH-6658](https://github.com/hashicorp/nomad/issues/6658)]
* api: Decompress web socket response body if gzipped on error responses [[GH-6650](https://github.com/hashicorp/nomad/issues/6650)]
* api: Fixed a bug where some FS/Allocation API endpoints didn't return error messages [[GH-6427](https://github.com/hashicorp/nomad/issues/6427)]

View File

@@ -25,9 +25,9 @@ type Context interface {
// Reset is invoked after making a placement
Reset()
// ProposedAllocs returns the proposed allocations for a node
// which is the existing allocations, removing evictions, and
// adding any planned placements.
// ProposedAllocs returns the proposed allocations for a node which are
// the existing allocations, removing evictions, and adding any planned
// placements.
ProposedAllocs(nodeID string) ([]*structs.Allocation, error)
// RegexpCache is a cache of regular expressions
@@ -120,22 +120,21 @@ func (e *EvalContext) Reset() {
func (e *EvalContext) ProposedAllocs(nodeID string) ([]*structs.Allocation, error) {
// Get the existing allocations that are non-terminal
ws := memdb.NewWatchSet()
existingAlloc, err := e.state.AllocsByNodeTerminal(ws, nodeID, false)
proposed, err := e.state.AllocsByNodeTerminal(ws, nodeID, false)
if err != nil {
return nil, err
}
// Determine the proposed allocation by first removing allocations
// that are planned evictions and adding the new allocations.
proposed := existingAlloc
if update := e.plan.NodeUpdate[nodeID]; len(update) > 0 {
proposed = structs.RemoveAllocs(existingAlloc, update)
proposed = structs.RemoveAllocs(proposed, update)
}
// Remove any allocs that are being preempted
nodePreemptedAllocs := e.plan.NodePreemptions[nodeID]
if len(nodePreemptedAllocs) > 0 {
proposed = structs.RemoveAllocs(existingAlloc, nodePreemptedAllocs)
proposed = structs.RemoveAllocs(proposed, nodePreemptedAllocs)
}
// We create an index of the existing allocations so that if an inplace

View File

@@ -106,9 +106,9 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
ClientStatus: structs.AllocClientStatusPending,
TaskGroup: "web",
}
noErr(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
noErr(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}))
require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
require.NoError(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}))
// Add a planned eviction to alloc1
plan := ctx.Plan()
@@ -149,6 +149,116 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
}
}
// TestEvalContext_ProposedAlloc_EvictPreempt asserts both Evicted and
// Preempted allocs are removed from the allocs propsed for a node.
//
// See https://github.com/hashicorp/nomad/issues/6787
//
func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) {
t.Parallel()
state, ctx := testContext(t)
nodes := []*RankedNode{
{
Node: &structs.Node{
ID: uuid.Generate(),
NodeResources: &structs.NodeResources{
Cpu: structs.NodeCpuResources{
CpuShares: 1024 * 3,
},
Memory: structs.NodeMemoryResources{
MemoryMB: 1024 * 3,
},
},
},
},
}
// Add existing allocations
j1, j2, j3 := mock.Job(), mock.Job(), mock.Job()
allocEvict := &structs.Allocation{
ID: uuid.Generate(),
Namespace: structs.DefaultNamespace,
EvalID: uuid.Generate(),
NodeID: nodes[0].Node.ID,
JobID: j1.ID,
Job: j1,
AllocatedResources: &structs.AllocatedResources{
Tasks: map[string]*structs.AllocatedTaskResources{
"web": {
Cpu: structs.AllocatedCpuResources{
CpuShares: 1024,
},
Memory: structs.AllocatedMemoryResources{
MemoryMB: 1024,
},
},
},
},
DesiredStatus: structs.AllocDesiredStatusRun,
ClientStatus: structs.AllocClientStatusPending,
TaskGroup: "web",
}
allocPreempt := &structs.Allocation{
ID: uuid.Generate(),
Namespace: structs.DefaultNamespace,
EvalID: uuid.Generate(),
NodeID: nodes[0].Node.ID,
JobID: j2.ID,
Job: j2,
AllocatedResources: &structs.AllocatedResources{
Tasks: map[string]*structs.AllocatedTaskResources{
"web": {
Cpu: structs.AllocatedCpuResources{
CpuShares: 1024,
},
Memory: structs.AllocatedMemoryResources{
MemoryMB: 1024,
},
},
},
},
DesiredStatus: structs.AllocDesiredStatusRun,
ClientStatus: structs.AllocClientStatusPending,
TaskGroup: "web",
}
allocPropose := &structs.Allocation{
ID: uuid.Generate(),
Namespace: structs.DefaultNamespace,
EvalID: uuid.Generate(),
NodeID: nodes[0].Node.ID,
JobID: j3.ID,
Job: j3,
AllocatedResources: &structs.AllocatedResources{
Tasks: map[string]*structs.AllocatedTaskResources{
"web": {
Cpu: structs.AllocatedCpuResources{
CpuShares: 1024,
},
Memory: structs.AllocatedMemoryResources{
MemoryMB: 1024,
},
},
},
},
DesiredStatus: structs.AllocDesiredStatusRun,
ClientStatus: structs.AllocClientStatusPending,
TaskGroup: "web",
}
require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(allocEvict.JobID)))
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPreempt.JobID)))
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPropose.JobID)))
require.NoError(t, state.UpsertAllocs(1000, []*structs.Allocation{allocEvict, allocPreempt, allocPropose}))
// Plan to evict one alloc and preempt another
plan := ctx.Plan()
plan.NodePreemptions[nodes[0].Node.ID] = []*structs.Allocation{allocEvict}
plan.NodeUpdate[nodes[0].Node.ID] = []*structs.Allocation{allocPreempt}
proposed, err := ctx.ProposedAllocs(nodes[0].Node.ID)
require.NoError(t, err)
require.Len(t, proposed, 1)
}
func TestEvalEligibility_JobStatus(t *testing.T) {
e := NewEvalEligibility()
cc := "v1:100"

File diff suppressed because it is too large Load Diff

View File

@@ -551,9 +551,9 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) {
ClientStatus: structs.AllocClientStatusPending,
TaskGroup: "web",
}
noErr(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
noErr(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}))
require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
require.NoError(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}))
taskGroup := &structs.TaskGroup{
EphemeralDisk: &structs.EphemeralDisk{},
@@ -666,9 +666,9 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) {
ClientStatus: structs.AllocClientStatusPending,
TaskGroup: "web",
}
noErr(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
noErr(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}))
require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
require.NoError(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}))
// Add a planned eviction to alloc1
plan := ctx.Plan()

View File

@@ -21,12 +21,12 @@ func TestSystemSched_JobRegister(t *testing.T) {
// Create some nodes
for i := 0; i < 10; i++ {
node := mock.Node()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
}
// Create a job
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
@@ -37,7 +37,7 @@ func TestSystemSched_JobRegister(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -68,7 +68,7 @@ func TestSystemSched_JobRegister(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
require.NoError(t, err)
// Ensure all allocations placed
if len(out) != 10 {
@@ -95,13 +95,13 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) {
// Create some nodes
for i := 0; i < 10; i++ {
node := mock.Node()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
}
// Create a job
job := mock.SystemJob()
job.TaskGroups[0].EphemeralDisk.Sticky = true
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
@@ -112,7 +112,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
if err := h.Process(NewSystemScheduler, eval); err != nil {
@@ -132,7 +132,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) {
// Get an allocation and mark it as failed
alloc := planned[4].Copy()
alloc.ClientStatus = structs.AllocClientStatusFailed
noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{alloc}))
require.NoError(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{alloc}))
// Create a mock evaluation to handle the update
eval = &structs.Evaluation{
@@ -143,7 +143,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
h1 := NewHarnessWithState(t, h.State)
if err := h1.Process(NewSystemScheduler, eval); err != nil {
t.Fatalf("err: %v", err)
@@ -170,18 +170,18 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) {
// Create a nodes
node := mock.Node()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
// Create a job
job := mock.SystemJob()
job.TaskGroups[0].EphemeralDisk.SizeMB = 60 * 1024
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
// Create another job with a lot of disk resource ask so that it doesn't fit
// the node
job1 := mock.SystemJob()
job1.TaskGroups[0].EphemeralDisk.SizeMB = 60 * 1024
noErr(t, h.State.UpsertJob(h.NextIndex(), job1))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job1))
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
@@ -192,7 +192,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
if err := h.Process(NewSystemScheduler, eval); err != nil {
@@ -202,7 +202,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
require.NoError(t, err)
// Ensure all allocations placed
if len(out) != 1 {
@@ -220,7 +220,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) {
JobID: job1.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1}))
// Process the evaluation
if err := h1.Process(NewSystemScheduler, eval1); err != nil {
@@ -228,7 +228,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) {
}
out, err = h1.State.AllocsByJob(ws, job.Namespace, job1.ID, false)
noErr(t, err)
require.NoError(t, err)
if len(out) != 0 {
t.Fatalf("bad: %#v", out)
}
@@ -239,7 +239,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) {
// Create a nodes
node := mock.Node()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
// Enable Preemption
h.State.SchedulerSetConfig(h.NextIndex(), &structs.SchedulerConfiguration{
@@ -252,7 +252,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) {
svcJob := mock.Job()
svcJob.TaskGroups[0].Count = 1
svcJob.TaskGroups[0].Tasks[0].Resources.CPU = 3600
noErr(t, h.State.UpsertJob(h.NextIndex(), svcJob))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), svcJob))
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
@@ -263,7 +263,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) {
JobID: svcJob.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewServiceScheduler, eval)
if err != nil {
@@ -272,7 +272,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) {
// Create a system job
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
// Create a mock evaluation to register the job
eval1 := &structs.Evaluation{
@@ -283,7 +283,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1}))
// Process the evaluation
if err := h.Process(NewSystemScheduler, eval1); err != nil {
t.Fatalf("err: %v", err)
@@ -324,7 +324,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
node.NodeClass = "bar"
}
node.ComputeClass()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
}
// Create a job constraining on node class
@@ -335,7 +335,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
Operand: "==",
}
job.Constraints = append(job.Constraints, fooConstraint)
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
@@ -347,7 +347,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
AnnotatePlan: true,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -373,7 +373,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
require.NoError(t, err)
// Ensure all allocations placed
if len(out) != 9 {
@@ -416,12 +416,12 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) {
for i := 0; i < 10; i++ {
node := mock.Node()
nodes = append(nodes, node)
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
}
// Generate a fake job with allocations
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
var allocs []*structs.Allocation
for _, node := range nodes {
@@ -432,11 +432,11 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) {
alloc.Name = "my-job.web[0]"
allocs = append(allocs, alloc)
}
noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
// Add a new node.
node := mock.Node()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
// Create a mock evaluation to deal with the node update
eval := &structs.Evaluation{
@@ -447,7 +447,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
if err != nil {
@@ -487,7 +487,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
require.NoError(t, err)
// Ensure all allocations placed
out, _ = structs.FilterTerminalAllocs(out)
@@ -504,7 +504,7 @@ func TestSystemSched_JobRegister_AllocFail(t *testing.T) {
// Create NO nodes
// Create a job
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
@@ -515,7 +515,7 @@ func TestSystemSched_JobRegister_AllocFail(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
if err != nil {
@@ -538,12 +538,12 @@ func TestSystemSched_JobModify(t *testing.T) {
for i := 0; i < 10; i++ {
node := mock.Node()
nodes = append(nodes, node)
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
}
// Generate a fake job with allocations
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
var allocs []*structs.Allocation
for _, node := range nodes {
@@ -554,7 +554,7 @@ func TestSystemSched_JobModify(t *testing.T) {
alloc.Name = "my-job.web[0]"
allocs = append(allocs, alloc)
}
noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
// Add a few terminal status allocations, these should be ignored
var terminal []*structs.Allocation
@@ -567,7 +567,7 @@ func TestSystemSched_JobModify(t *testing.T) {
alloc.DesiredStatus = structs.AllocDesiredStatusStop
terminal = append(terminal, alloc)
}
noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), terminal))
// Update the job
job2 := mock.SystemJob()
@@ -575,7 +575,7 @@ func TestSystemSched_JobModify(t *testing.T) {
// Update the task, such that it cannot be done in-place
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
noErr(t, h.State.UpsertJob(h.NextIndex(), job2))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
@@ -586,7 +586,7 @@ func TestSystemSched_JobModify(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -621,7 +621,7 @@ func TestSystemSched_JobModify(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
require.NoError(t, err)
// Ensure all allocations placed
out, _ = structs.FilterTerminalAllocs(out)
@@ -640,12 +640,12 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) {
for i := 0; i < 10; i++ {
node := mock.Node()
nodes = append(nodes, node)
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
}
// Generate a fake job with allocations
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
var allocs []*structs.Allocation
for _, node := range nodes {
@@ -656,7 +656,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) {
alloc.Name = "my-job.web[0]"
allocs = append(allocs, alloc)
}
noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
// Update the job
job2 := mock.SystemJob()
@@ -668,7 +668,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) {
// Update the task, such that it cannot be done in-place
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
noErr(t, h.State.UpsertJob(h.NextIndex(), job2))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
@@ -679,7 +679,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
if err != nil {
@@ -743,12 +743,12 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) {
for i := 0; i < 10; i++ {
node := mock.Node()
nodes = append(nodes, node)
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
}
// Generate a fake job with allocations
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
var allocs []*structs.Allocation
for _, node := range nodes {
@@ -759,12 +759,12 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) {
alloc.Name = "my-job.web[0]"
allocs = append(allocs, alloc)
}
noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
// Update the job
job2 := mock.SystemJob()
job2.ID = job.ID
noErr(t, h.State.UpsertJob(h.NextIndex(), job2))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
@@ -775,7 +775,7 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -815,7 +815,7 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
require.NoError(t, err)
// Ensure all allocations placed
if len(out) != 10 {
@@ -842,7 +842,7 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) {
for i := 0; i < 10; i++ {
node := mock.Node()
nodes = append(nodes, node)
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
}
// Generate a fake job with allocations
@@ -858,9 +858,9 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) {
allocs = append(allocs, alloc)
}
for _, alloc := range allocs {
noErr(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)))
require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)))
}
noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
@@ -871,7 +871,7 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -895,7 +895,7 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
require.NoError(t, err)
// Ensure no remaining allocations
out, _ = structs.FilterTerminalAllocs(out)
@@ -914,13 +914,13 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) {
for i := 0; i < 10; i++ {
node := mock.Node()
nodes = append(nodes, node)
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
}
// Generate a fake job with allocations
job := mock.SystemJob()
job.Stop = true
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
var allocs []*structs.Allocation
for _, node := range nodes {
@@ -932,9 +932,9 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) {
allocs = append(allocs, alloc)
}
for _, alloc := range allocs {
noErr(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)))
require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)))
}
noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
@@ -945,7 +945,7 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -969,7 +969,7 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
require.NoError(t, err)
// Ensure no remaining allocations
out, _ = structs.FilterTerminalAllocs(out)
@@ -986,11 +986,11 @@ func TestSystemSched_NodeDown(t *testing.T) {
// Register a down node
node := mock.Node()
node.Status = structs.NodeStatusDown
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
// Generate a fake job allocated on that node.
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
alloc := mock.Alloc()
alloc.Job = job
@@ -998,7 +998,7 @@ func TestSystemSched_NodeDown(t *testing.T) {
alloc.NodeID = node.ID
alloc.Name = "my-job.web[0]"
alloc.DesiredTransition.Migrate = helper.BoolToPtr(true)
noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
@@ -1010,7 +1010,7 @@ func TestSystemSched_NodeDown(t *testing.T) {
NodeID: node.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -1054,18 +1054,18 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) {
node := mock.Node()
node.Drain = true
node.Status = structs.NodeStatusDown
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
// Generate a fake job allocated on that node.
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
alloc := mock.Alloc()
alloc.Job = job
alloc.JobID = job.ID
alloc.NodeID = node.ID
alloc.Name = "my-job.web[0]"
noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
// Create a mock evaluation to deal with the node update
eval := &structs.Evaluation{
@@ -1077,7 +1077,7 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) {
NodeID: node.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewServiceScheduler, eval)
@@ -1115,11 +1115,11 @@ func TestSystemSched_NodeDrain(t *testing.T) {
// Register a draining node
node := mock.Node()
node.Drain = true
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
// Generate a fake job allocated on that node.
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
alloc := mock.Alloc()
alloc.Job = job
@@ -1127,7 +1127,7 @@ func TestSystemSched_NodeDrain(t *testing.T) {
alloc.NodeID = node.ID
alloc.Name = "my-job.web[0]"
alloc.DesiredTransition.Migrate = helper.BoolToPtr(true)
noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
@@ -1139,7 +1139,7 @@ func TestSystemSched_NodeDrain(t *testing.T) {
NodeID: node.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -1181,18 +1181,18 @@ func TestSystemSched_NodeUpdate(t *testing.T) {
// Register a node
node := mock.Node()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
// Generate a fake job allocated on that node.
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
alloc := mock.Alloc()
alloc.Job = job
alloc.JobID = job.ID
alloc.NodeID = node.ID
alloc.Name = "my-job.web[0]"
noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
// Create a mock evaluation to deal
eval := &structs.Evaluation{
@@ -1204,7 +1204,7 @@ func TestSystemSched_NodeUpdate(t *testing.T) {
NodeID: node.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -1227,12 +1227,12 @@ func TestSystemSched_RetryLimit(t *testing.T) {
// Create some nodes
for i := 0; i < 10; i++ {
node := mock.Node()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
}
// Create a job
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
@@ -1243,7 +1243,7 @@ func TestSystemSched_RetryLimit(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -1259,7 +1259,7 @@ func TestSystemSched_RetryLimit(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
require.NoError(t, err)
// Ensure no allocations placed
if len(out) != 0 {
@@ -1279,11 +1279,11 @@ func TestSystemSched_Queued_With_Constraints(t *testing.T) {
// Register a node
node := mock.Node()
node.Attributes["kernel.name"] = "darwin"
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
// Generate a system job which can't be placed on the node
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
// Create a mock evaluation to deal
eval := &structs.Evaluation{
@@ -1295,7 +1295,7 @@ func TestSystemSched_Queued_With_Constraints(t *testing.T) {
NodeID: node.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -1388,12 +1388,12 @@ func TestSystemSched_ChainedAlloc(t *testing.T) {
// Create some nodes
for i := 0; i < 10; i++ {
node := mock.Node()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
}
// Create a job
job := mock.SystemJob()
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
@@ -1404,7 +1404,7 @@ func TestSystemSched_ChainedAlloc(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
if err := h.Process(NewSystemScheduler, eval); err != nil {
t.Fatalf("err: %v", err)
@@ -1424,12 +1424,12 @@ func TestSystemSched_ChainedAlloc(t *testing.T) {
job1.ID = job.ID
job1.TaskGroups[0].Tasks[0].Env = make(map[string]string)
job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar"
noErr(t, h1.State.UpsertJob(h1.NextIndex(), job1))
require.NoError(t, h1.State.UpsertJob(h1.NextIndex(), job1))
// Insert two more nodes
for i := 0; i < 2; i++ {
node := mock.Node()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
}
// Create a mock evaluation to update the job
@@ -1441,7 +1441,7 @@ func TestSystemSched_ChainedAlloc(t *testing.T) {
JobID: job1.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1}))
// Process the evaluation
if err := h1.Process(NewSystemScheduler, eval1); err != nil {
t.Fatalf("err: %v", err)
@@ -1484,12 +1484,12 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) {
node.NodeClass = "green"
node.Drain = true
node.ComputeClass()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
node2 := mock.Node()
node2.NodeClass = "blue"
node2.ComputeClass()
noErr(t, h.State.UpsertNode(h.NextIndex(), node2))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node2))
// Create a Job with two task groups, each constrained on node class
job := mock.SystemJob()
@@ -1505,7 +1505,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) {
tg2.Name = "web2"
tg2.Constraints[0].RTarget = "blue"
job.TaskGroups = append(job.TaskGroups, tg2)
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
// Create an allocation on each node
alloc := mock.Alloc()
@@ -1522,7 +1522,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) {
alloc2.NodeID = node2.ID
alloc2.Name = "my-job.web2[0]"
alloc2.TaskGroup = "web2"
noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc, alloc2}))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc, alloc2}))
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
@@ -1534,7 +1534,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) {
NodeID: node.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -1574,12 +1574,12 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) {
node := mock.Node()
node.NodeClass = "green"
node.ComputeClass()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
node2 := mock.Node()
node2.NodeClass = "blue"
node2.ComputeClass()
noErr(t, h.State.UpsertNode(h.NextIndex(), node2))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node2))
// Create a Job with two task groups, each constrained on node class
job := mock.SystemJob()
@@ -1595,7 +1595,7 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) {
tg2.Name = "web2"
tg2.Constraints[0].RTarget = "blue"
job.TaskGroups = append(job.TaskGroups, tg2)
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
@@ -1607,7 +1607,7 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) {
NodeID: node.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -1666,7 +1666,7 @@ func TestSystemSched_Preemption(t *testing.T) {
},
},
}
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
nodes = append(nodes, node)
}
@@ -1728,7 +1728,7 @@ func TestSystemSched_Preemption(t *testing.T) {
},
}
noErr(t, h.State.UpsertJob(h.NextIndex(), job1))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job1))
job2 := mock.BatchJob()
job2.Type = structs.JobTypeBatch
@@ -1771,7 +1771,7 @@ func TestSystemSched_Preemption(t *testing.T) {
DiskMB: 5 * 1024,
},
}
noErr(t, h.State.UpsertJob(h.NextIndex(), job2))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
job3 := mock.Job()
job3.Type = structs.JobTypeBatch
@@ -1816,7 +1816,7 @@ func TestSystemSched_Preemption(t *testing.T) {
DiskMB: 5 * 1024,
},
}
noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc1, alloc2, alloc3}))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc1, alloc2, alloc3}))
// Create a high priority job and allocs for it
// These allocs should not be preempted
@@ -1863,8 +1863,8 @@ func TestSystemSched_Preemption(t *testing.T) {
DiskMB: 2 * 1024,
},
}
noErr(t, h.State.UpsertJob(h.NextIndex(), job4))
noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc4}))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job4))
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc4}))
// Create a system job such that it would need to preempt both allocs to succeed
job := mock.SystemJob()
@@ -1878,7 +1878,7 @@ func TestSystemSched_Preemption(t *testing.T) {
},
},
}
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
@@ -1889,7 +1889,7 @@ func TestSystemSched_Preemption(t *testing.T) {
JobID: job.ID,
Status: structs.EvalStatusPending,
}
noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
// Process the evaluation
err := h.Process(NewSystemScheduler, eval)
@@ -1921,7 +1921,7 @@ func TestSystemSched_Preemption(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
require.NoError(err)
// Ensure all allocations placed
require.Equal(2, len(out))
@@ -1945,7 +1945,7 @@ func TestSystemSched_Preemption(t *testing.T) {
for _, jobId := range expectedPreemptedJobIDs {
out, err = h.State.AllocsByJob(ws, structs.DefaultNamespace, jobId, false)
noErr(t, err)
require.NoError(err)
for _, alloc := range out {
require.Equal(structs.AllocDesiredStatusEvict, alloc.DesiredStatus)
require.Equal(fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocId), alloc.DesiredDescription)

View File

@@ -15,13 +15,6 @@ import (
"github.com/hashicorp/nomad/nomad/structs"
)
// noErr is used to assert there are no errors
func noErr(t *testing.T, err error) {
if err != nil {
t.Fatalf("err: %v", err)
}
}
func TestMaterializeTaskGroups(t *testing.T) {
job := mock.Job()
index := materializeTaskGroups(job)

View File

@@ -15,6 +15,24 @@ details provided for their upgrades as a result of new features or changed
behavior. This page is used to document those details separately from the
standard upgrade flow.
## Nomad 0.10.2
### Preemption Panic Fixed
Nomad 0.10.2 fixes a [server crashing bug][gh-6787] present in scheduler
preemption since 0.9.0. Users unable to immediately upgrade to Nomad 0.10.2 can
[disable preemption][preemption-api] to avoid the panic.
### Dangling Docker Container Cleanup
Nomad 0.10.2 addresses an issue occurring in heavily loaded clients, where
containers are started without being properly managed by Nomad. Nomad 0.10.2
introduced a reaper that detects and kills such containers.
Operators may opt to run reaper in a dry-mode or disabling it through a client config.
For more information, see [Docker Dangling containers][dangling-containers].
## Nomad 0.10.0
### Deployments
@@ -364,12 +382,15 @@ deleted and then Nomad 0.3.0 can be launched.
[drain-api]: /api/nodes.html#drain-node
[drain-cli]: /docs/commands/node/drain.html
[dangling-containers]: /docs/drivers/docker.html#dangling-containers
[gh-6787]: https://github.com/hashicorp/nomad/issues/6787
[hcl2]: https://github.com/hashicorp/hcl2
[lxc]: /docs/drivers/external/lxc.html
[migrate]: /docs/job-specification/migrate.html
[plugins]: /docs/drivers/external/index.html
[plugin-stanza]: /docs/configuration/plugin.html
[preemption]: /docs/internals/scheduling/preemption.html
[preemption-api]: /api/operator.html#update-scheduler-configuration
[task-config]: /docs/job-specification/task.html#config
[validate]: /docs/commands/job/validate.html
[update]: /docs/job-specification/update.html