mirror of
https://github.com/kemko/nomad.git
synced 2026-01-01 16:05:42 +03:00
command: monitor output updates
This commit is contained in:
@@ -23,6 +23,7 @@ type evalState struct {
|
||||
status string
|
||||
desc string
|
||||
node string
|
||||
job string
|
||||
allocs map[string]*allocState
|
||||
wait time.Duration
|
||||
index uint64
|
||||
@@ -36,6 +37,7 @@ type allocState struct {
|
||||
desired string
|
||||
desiredDesc string
|
||||
client string
|
||||
clientDesc string
|
||||
index uint64
|
||||
|
||||
// full is the allocation struct with full details. This
|
||||
@@ -91,6 +93,17 @@ func (m *monitor) update(update *evalState) {
|
||||
m.state = update
|
||||
}()
|
||||
|
||||
// Check if the evaluation was triggered by a node
|
||||
if existing.node == "" && update.node != "" {
|
||||
m.ui.Output(fmt.Sprintf("Evaluation triggered by node %q",
|
||||
update.node))
|
||||
}
|
||||
|
||||
// Check if the evaluation was triggered by a job
|
||||
if existing.job == "" && update.job != "" {
|
||||
m.ui.Output(fmt.Sprintf("Evaluation triggered by job %q", update.job))
|
||||
}
|
||||
|
||||
// Check the allocations
|
||||
for allocID, alloc := range update.allocs {
|
||||
if existing, ok := existing.allocs[allocID]; !ok {
|
||||
@@ -101,6 +114,11 @@ func (m *monitor) update(update *evalState) {
|
||||
m.ui.Output(fmt.Sprintf("Scheduling error for group %q (%s)",
|
||||
alloc.group, alloc.desiredDesc))
|
||||
|
||||
// Log the client status, if any provided
|
||||
if alloc.clientDesc != "" {
|
||||
m.ui.Output("Client reported status: " + alloc.clientDesc)
|
||||
}
|
||||
|
||||
// Generate a more descriptive error for why the allocation
|
||||
// failed and dump it to the screen
|
||||
if alloc.full != nil {
|
||||
@@ -125,8 +143,8 @@ func (m *monitor) update(update *evalState) {
|
||||
case existing.client != alloc.client:
|
||||
// Allocation status has changed
|
||||
m.ui.Output(fmt.Sprintf(
|
||||
"Allocation %q status changed: %q -> %q",
|
||||
alloc.id, existing.client, alloc.client))
|
||||
"Allocation %q status changed: %q -> %q (%s)",
|
||||
alloc.id, existing.client, alloc.client, alloc.clientDesc))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -138,18 +156,6 @@ func (m *monitor) update(update *evalState) {
|
||||
m.ui.Output(fmt.Sprintf("Evaluation status changed: %q -> %q",
|
||||
existing.status, update.status))
|
||||
}
|
||||
|
||||
// Check if the wait time is different
|
||||
if existing.wait == 0 && update.wait != 0 {
|
||||
m.ui.Output(fmt.Sprintf("Evaluation delay is %s",
|
||||
update.wait))
|
||||
}
|
||||
|
||||
// Check if the evaluation was triggered by a node
|
||||
if existing.node == "" && update.node != "" {
|
||||
m.ui.Output(fmt.Sprintf("Evaluation triggered by node %q",
|
||||
update.node))
|
||||
}
|
||||
}
|
||||
|
||||
// monitor is used to start monitoring the given evaluation ID. It
|
||||
@@ -181,6 +187,7 @@ func (m *monitor) monitor(evalID string) int {
|
||||
status: eval.Status,
|
||||
desc: eval.StatusDescription,
|
||||
node: eval.NodeID,
|
||||
job: eval.JobID,
|
||||
allocs: make(map[string]*allocState),
|
||||
wait: eval.Wait,
|
||||
index: eval.CreateIndex,
|
||||
@@ -202,6 +209,7 @@ func (m *monitor) monitor(evalID string) int {
|
||||
desired: alloc.DesiredStatus,
|
||||
desiredDesc: alloc.DesiredDescription,
|
||||
client: alloc.ClientStatus,
|
||||
clientDesc: alloc.ClientDescription,
|
||||
index: alloc.CreateIndex,
|
||||
}
|
||||
|
||||
@@ -231,8 +239,15 @@ func (m *monitor) monitor(evalID string) int {
|
||||
continue
|
||||
}
|
||||
|
||||
// Monitor the next eval, if it exists.
|
||||
// Monitor the next eval in the chain, if present
|
||||
if eval.NextEval != "" {
|
||||
m.ui.Info(fmt.Sprintf(
|
||||
"Monitoring next evaluation %q in %s",
|
||||
eval.NextEval, eval.Wait))
|
||||
|
||||
// Skip some unnecessary polling
|
||||
time.Sleep(eval.Wait)
|
||||
|
||||
m.init()
|
||||
return m.monitor(eval.NextEval)
|
||||
}
|
||||
@@ -257,6 +272,14 @@ func dumpAllocStatus(ui cli.Ui, alloc *api.Allocation) {
|
||||
alloc.ID, alloc.ClientStatus,
|
||||
alloc.Metrics.NodesFiltered, alloc.Metrics.NodesEvaluated))
|
||||
|
||||
// Print filter info
|
||||
for class, num := range alloc.Metrics.ClassFiltered {
|
||||
ui.Output(fmt.Sprintf(" * Class %q filtered %d nodes", class, num))
|
||||
}
|
||||
for cs, num := range alloc.Metrics.ConstraintFiltered {
|
||||
ui.Output(fmt.Sprintf(" * Constraint %q filtered %d nodes", cs, num))
|
||||
}
|
||||
|
||||
// Print exhaustion info
|
||||
if ne := alloc.Metrics.NodesExhausted; ne > 0 {
|
||||
ui.Output(fmt.Sprintf(" * Resources exhausted on %d nodes", ne))
|
||||
@@ -267,12 +290,4 @@ func dumpAllocStatus(ui cli.Ui, alloc *api.Allocation) {
|
||||
for dim, num := range alloc.Metrics.DimensionExhausted {
|
||||
ui.Output(fmt.Sprintf(" * Dimension %q exhausted on %d nodes", dim, num))
|
||||
}
|
||||
|
||||
// Print filter info
|
||||
for class, num := range alloc.Metrics.ClassFiltered {
|
||||
ui.Output(fmt.Sprintf(" * Class %q filtered %d nodes", class, num))
|
||||
}
|
||||
for cs, num := range alloc.Metrics.ConstraintFiltered {
|
||||
ui.Output(fmt.Sprintf(" * Constraint %q filtered %d nodes", cs, num))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,22 +14,31 @@ func TestMonitor_Update_Eval(t *testing.T) {
|
||||
ui := new(cli.MockUi)
|
||||
mon := newMonitor(ui, nil)
|
||||
|
||||
// Evals triggered by jobs log
|
||||
state := &evalState{
|
||||
status: structs.EvalStatusPending,
|
||||
node: "node1",
|
||||
wait: 10 * time.Second,
|
||||
index: 2,
|
||||
job: "job1",
|
||||
}
|
||||
mon.update(state)
|
||||
|
||||
// Logs were output
|
||||
out := ui.OutputWriter.String()
|
||||
if !strings.Contains(out, "job1") {
|
||||
t.Fatalf("missing job\n\n%s", out)
|
||||
}
|
||||
ui.OutputWriter.Reset()
|
||||
mon.init()
|
||||
|
||||
// Evals trigerred by nodes log
|
||||
state = &evalState{
|
||||
status: structs.EvalStatusPending,
|
||||
node: "node1",
|
||||
}
|
||||
mon.update(state)
|
||||
|
||||
out = ui.OutputWriter.String()
|
||||
if !strings.Contains(out, "node1") {
|
||||
t.Fatalf("missing node\n\n%s", out)
|
||||
}
|
||||
if !strings.Contains(out, "10s") {
|
||||
t.Fatalf("missing eval wait\n\n%s", out)
|
||||
}
|
||||
|
||||
// Transition to pending should not be logged
|
||||
if strings.Contains(out, structs.EvalStatusPending) {
|
||||
@@ -47,8 +56,6 @@ func TestMonitor_Update_Eval(t *testing.T) {
|
||||
state = &evalState{
|
||||
status: structs.EvalStatusComplete,
|
||||
node: "node1",
|
||||
wait: 10 * time.Second,
|
||||
index: 3,
|
||||
}
|
||||
mon.update(state)
|
||||
out = ui.OutputWriter.String()
|
||||
@@ -139,6 +146,8 @@ func TestMonitor_Update_SchedulingFailure(t *testing.T) {
|
||||
group: "group2",
|
||||
desired: structs.AllocDesiredStatusFailed,
|
||||
desiredDesc: "something failed",
|
||||
client: structs.AllocClientStatusFailed,
|
||||
clientDesc: "client failed",
|
||||
index: 1,
|
||||
|
||||
// Attach the full failed allocation
|
||||
@@ -169,7 +178,10 @@ func TestMonitor_Update_SchedulingFailure(t *testing.T) {
|
||||
t.Fatalf("missing failure\n\n%s", out)
|
||||
}
|
||||
if !strings.Contains(out, "something failed") {
|
||||
t.Fatalf("missing reason\n\n%s", out)
|
||||
t.Fatalf("missing desired desc\n\n%s", out)
|
||||
}
|
||||
if !strings.Contains(out, "client failed") {
|
||||
t.Fatalf("missing client desc\n\n%s", out)
|
||||
}
|
||||
|
||||
// Check that the allocation details were dumped
|
||||
|
||||
@@ -108,7 +108,8 @@ func (c *RunCommand) Run(args []string) int {
|
||||
|
||||
// Check if we should enter monitor mode
|
||||
if detach {
|
||||
c.Ui.Output(evalID)
|
||||
c.Ui.Output("Job registration successful")
|
||||
c.Ui.Output("Evaluation ID: " + evalID)
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user