Fix bug, add tests, and cli output

This commit is contained in:
Alex Dadgar
2016-01-04 14:23:06 -08:00
parent 251bcf106f
commit 892c7ddee3
6 changed files with 34 additions and 14 deletions

View File

@@ -62,6 +62,7 @@ type Allocation struct {
type AllocationMetric struct {
NodesEvaluated int
NodesFiltered int
NodesAvailable map[string]int
ClassFiltered map[string]int
ConstraintFiltered map[string]int
NodesExhausted int

View File

@@ -280,6 +280,14 @@ func dumpAllocStatus(ui cli.Ui, alloc *api.Allocation) {
ui.Output(" * No nodes were eligible for evaluation")
}
// Print a helpful message if the user has asked for a DC that has no
// available nodes.
for dc, available := range alloc.Metrics.NodesAvailable {
if available == 0 {
ui.Output(fmt.Sprintf(" * No nodes are available in datacenter %q", dc))
}
}
// Print filter info
for class, num := range alloc.Metrics.ClassFiltered {
ui.Output(fmt.Sprintf(" * Class %q filtered %d nodes", class, num))

View File

@@ -250,9 +250,6 @@ func (s *GenericScheduler) computePlacements(place []allocTuple) error {
return err
}
// Store the available nodes by datacenter
s.ctx.Metrics().NodesAvailable = byDC
// Update the set of placement ndoes
s.stack.SetNodes(nodes)
@@ -282,6 +279,9 @@ func (s *GenericScheduler) computePlacements(place []allocTuple) error {
Metrics: s.ctx.Metrics(),
}
// Store the available nodes by datacenter
s.ctx.Metrics().NodesAvailable = byDC
// Set fields based on if we found an allocation option
if option != nil {
// Generate the service ids for the tasks which this allocation is going
@@ -303,5 +303,6 @@ func (s *GenericScheduler) computePlacements(place []allocTuple) error {
failedTG[missing.TaskGroup] = alloc
}
}
return nil
}

View File

@@ -123,6 +123,11 @@ func TestServiceSched_JobRegister_AllocFail(t *testing.T) {
t.Fatalf("bad: %#v", out[0].Metrics)
}
// Check the available nodes
if count, ok := out[0].Metrics.NodesAvailable["dc1"]; !ok || count != 0 {
t.Fatalf("bad: %#v", out[0].Metrics)
}
h.AssertEvalStatus(t, structs.EvalStatusComplete)
}

View File

@@ -25,12 +25,13 @@ type SystemScheduler struct {
state State
planner Planner
eval *structs.Evaluation
job *structs.Job
plan *structs.Plan
ctx *EvalContext
stack *SystemStack
nodes []*structs.Node
eval *structs.Evaluation
job *structs.Job
plan *structs.Plan
ctx *EvalContext
stack *SystemStack
nodes []*structs.Node
nodesByDC map[string]int
limitReached bool
nextEval *structs.Evaluation
@@ -86,14 +87,10 @@ func (s *SystemScheduler) process() (bool, error) {
// Get the ready nodes in the required datacenters
if s.job != nil {
var byDC map[string]int
s.nodes, byDC, err = readyNodesInDCs(s.state, s.job.Datacenters)
s.nodes, s.nodesByDC, err = readyNodesInDCs(s.state, s.job.Datacenters)
if err != nil {
return false, fmt.Errorf("failed to get ready nodes: %v", err)
}
// Store the available nodes by datacenter
s.ctx.Metrics().NodesAvailable = byDC
}
// Create a plan
@@ -250,6 +247,9 @@ func (s *SystemScheduler) computePlacements(place []allocTuple) error {
Metrics: s.ctx.Metrics(),
}
// Store the available nodes by datacenter
s.ctx.Metrics().NodesAvailable = s.nodesByDC
// Set fields based on if we found an allocation option
if option != nil {
// Generate the service ids for the tasks that this allocation is going

View File

@@ -59,6 +59,11 @@ func TestSystemSched_JobRegister(t *testing.T) {
t.Fatalf("bad: %#v", out)
}
// Check the available nodes
if count, ok := out[0].Metrics.NodesAvailable["dc1"]; !ok || count != 10 {
t.Fatalf("bad: %#v", out[0].Metrics)
}
h.AssertEvalStatus(t, structs.EvalStatusComplete)
}