mirror of
https://github.com/kemko/nomad.git
synced 2026-01-06 18:35:44 +03:00
Fixed the logic of calculating queued allocation in sys sched (#1724)
This commit is contained in:
committed by
GitHub
parent
0150970620
commit
58e5282378
@@ -259,10 +259,6 @@ func (s *SystemScheduler) computePlacements(place []allocTuple) error {
|
||||
}
|
||||
|
||||
nodes := make([]*structs.Node, 1)
|
||||
|
||||
// nodesFiltered holds the number of nodes filtered by the stack due to
|
||||
// constrain mismatches while we are trying to place allocations on node
|
||||
var nodesFiltered int
|
||||
for _, missing := range place {
|
||||
node, ok := nodeByID[missing.Alloc.NodeID]
|
||||
if !ok {
|
||||
@@ -280,7 +276,7 @@ func (s *SystemScheduler) computePlacements(place []allocTuple) error {
|
||||
// If nodes were filtered because of constain mismatches and we
|
||||
// couldn't create an allocation then decrementing queued for that
|
||||
// task group
|
||||
if s.ctx.metrics.NodesFiltered > nodesFiltered {
|
||||
if s.ctx.metrics.NodesFiltered > 0 {
|
||||
s.queuedAllocs[missing.TaskGroup.Name] -= 1
|
||||
|
||||
// If we are annotating the plan, then decrement the desired
|
||||
@@ -292,9 +288,6 @@ func (s *SystemScheduler) computePlacements(place []allocTuple) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Record the current number of nodes filtered in this iteration
|
||||
nodesFiltered = s.ctx.metrics.NodesFiltered
|
||||
|
||||
// Check if this task group has already failed
|
||||
if metric, ok := s.failedTGAllocs[missing.TaskGroup.Name]; ok {
|
||||
metric.CoalescedFailures += 1
|
||||
|
||||
@@ -1315,3 +1315,61 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) {
|
||||
|
||||
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
||||
}
|
||||
|
||||
func TestSystemSched_QueuedAllocsMultTG(t *testing.T) {
|
||||
h := NewHarness(t)
|
||||
|
||||
// Register two nodes with two different classes
|
||||
node := mock.Node()
|
||||
node.NodeClass = "green"
|
||||
node.ComputeClass()
|
||||
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
|
||||
|
||||
node2 := mock.Node()
|
||||
node2.NodeClass = "blue"
|
||||
node2.ComputeClass()
|
||||
noErr(t, h.State.UpsertNode(h.NextIndex(), node2))
|
||||
|
||||
// Create a Job with two task groups, each constrianed on node class
|
||||
job := mock.SystemJob()
|
||||
tg1 := job.TaskGroups[0]
|
||||
tg1.Constraints = append(tg1.Constraints,
|
||||
&structs.Constraint{
|
||||
LTarget: "${node.class}",
|
||||
RTarget: "green",
|
||||
Operand: "==",
|
||||
})
|
||||
|
||||
tg2 := tg1.Copy()
|
||||
tg2.Name = "web2"
|
||||
tg2.Constraints[0].RTarget = "blue"
|
||||
job.TaskGroups = append(job.TaskGroups, tg2)
|
||||
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
|
||||
|
||||
// Create a mock evaluation to deal with drain
|
||||
eval := &structs.Evaluation{
|
||||
ID: structs.GenerateUUID(),
|
||||
Priority: 50,
|
||||
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
||||
JobID: job.ID,
|
||||
NodeID: node.ID,
|
||||
}
|
||||
|
||||
// Process the evaluation
|
||||
err := h.Process(NewSystemScheduler, eval)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure a single plan
|
||||
if len(h.Plans) != 1 {
|
||||
t.Fatalf("bad: %#v", h.Plans)
|
||||
}
|
||||
|
||||
qa := h.Evals[0].QueuedAllocations
|
||||
if qa["web"] != 0 || qa["web2"] != 0 {
|
||||
t.Fatalf("bad queued allocations %#v", qa)
|
||||
}
|
||||
|
||||
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user