scheduler: basic metrics integration

This commit is contained in:
Armon Dadgar
2015-08-13 21:46:33 -07:00
parent 25a60ebd81
commit 93fa71609c
4 changed files with 55 additions and 7 deletions

View File

@@ -588,6 +588,10 @@ type Constraint struct {
Weight int // Soft constraints can vary the weight
}
func (c *Constraint) String() string {
return fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget)
}
const (
AllocStatusPending = "pending"
AllocStatusInit = "initializing"
@@ -652,19 +656,52 @@ type AllocMetric struct {
// ClassExhausted is the number of nodes exhausted by class
ClassExhausted map[string]int
// Preemptions is the number of preemptions considered.
// This indicates a relatively busy fleet if high.
Preemptions int
// Scores is the scores of the final few nodes remaining
// for placement. The top score is typically selected.
Scores map[string]int
Scores map[string]float64
// AllocationTime is a measure of how long the allocation
// attempt took. This can affect performance and SLAs.
AllocationTime time.Duration
}
func (a *AllocMetric) EvaluateNode() {
a.NodesEvaluated += 1
}
func (a *AllocMetric) FilterNode(node *Node, constraint string) {
a.NodesFiltered += 1
if node != nil && node.NodeClass != "" {
if a.ClassFiltered == nil {
a.ClassFiltered = make(map[string]int)
}
a.ClassFiltered[node.NodeClass] += 1
}
if constraint != "" {
if a.ConstraintFiltered == nil {
a.ConstraintFiltered = make(map[string]int)
}
a.ConstraintFiltered[constraint] += 1
}
}
func (a *AllocMetric) ExhaustedNode(node *Node) {
a.NodesExhausted += 1
if node != nil && node.NodeClass != "" {
if a.ClassExhausted == nil {
a.ClassExhausted = make(map[string]int)
}
a.ClassExhausted[node.NodeClass] += 1
}
}
func (a *AllocMetric) ScoreNode(node *Node, score float64) {
if a.Scores == nil {
a.Scores = make(map[string]float64)
}
a.Scores[node.ID] = score
}
const (
EvalStatusPending = "pending"
EvalStatusComplete = "complete"

View File

@@ -55,6 +55,7 @@ func (iter *StaticIterator) Next() *structs.Node {
offset := iter.offset
iter.offset += 1
iter.seen += 1
iter.ctx.Metrics().EvaluateNode()
return iter.nodes[offset]
}
@@ -111,6 +112,7 @@ func (iter *DriverIterator) Next() *structs.Node {
if iter.hasDrivers(option) {
return option
}
iter.ctx.Metrics().FilterNode(option, "missing drivers")
}
}
@@ -177,6 +179,7 @@ func (iter *ConstraintIterator) Reset() {
func (iter *ConstraintIterator) meetsConstraints(option *structs.Node) bool {
for _, constraint := range iter.constraints {
if !iter.meetsConstraint(constraint, option) {
iter.ctx.Metrics().FilterNode(option, constraint.String())
return false
}
}

View File

@@ -167,6 +167,7 @@ func (iter *BinPackIterator) Next() *RankedNode {
// Check if these allocations fit, if they do not, simply skip this node
fit, util, _ := structs.AllocsFit(option.Node, proposed)
if !fit {
iter.ctx.Metrics().ExhaustedNode(option.Node)
continue
}
@@ -177,6 +178,7 @@ func (iter *BinPackIterator) Next() *RankedNode {
// Score the fit normally otherwise
option.Score = structs.ScoreFit(option.Node, util)
iter.ctx.Metrics().ScoreNode(option.Node, option.Score)
return option
}
}

View File

@@ -2,6 +2,7 @@ package scheduler
import (
"math"
"time"
"github.com/hashicorp/nomad/nomad/structs"
)
@@ -81,6 +82,7 @@ func (s *ServiceStack) Select(tg *structs.TaskGroup) (*RankedNode, *structs.Reso
// Reset the max selector and context
s.maxScore.Reset()
s.ctx.Reset()
start := time.Now()
// Collect the constraints, drivers and resources required by each
// sub-task to aggregate the TaskGroup totals
@@ -99,6 +101,10 @@ func (s *ServiceStack) Select(tg *structs.TaskGroup) (*RankedNode, *structs.Reso
s.taskGroupConstraint.SetConstraints(constr)
s.binPack.SetResources(size)
// Return the node with the max score
return s.maxScore.Next(), size
// Find the node with the max score
option := s.maxScore.Next()
// Store the compute time
s.ctx.Metrics().AllocationTime = time.Since(start)
return option, size
}