From e1fc445b4d724f9cd64a2a295c3fc55c1af722ea Mon Sep 17 00:00:00 2001 From: Diptanu Choudhury Date: Tue, 5 Jul 2016 18:20:38 -0700 Subject: [PATCH] Added a test to ensure client alloc updates are happening properly --- nomad/state/state_store.go | 7 ++-- nomad/state/state_store_test.go | 63 +++++++++++++++++++++++++++++++-- 2 files changed, 66 insertions(+), 4 deletions(-) diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index f0003326f..b14a96bd1 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -764,7 +764,8 @@ func (s *StateStore) Evals() (memdb.ResultIterator, error) { return iter, nil } -// UpdateAllocFromClient is used to update an allocation based on input +// UStarting pdateAllocFromClient is used to update an allocation based on input + // from a client. While the schedulers are the authority on the allocation for // most things, some updates are authoritative from the client. Specifically, // the desired state comes from the schedulers, while the actual state comes @@ -1272,7 +1273,9 @@ func (s *StateStore) updateSummaryWithAlloc(newAlloc *structs.Allocation, case structs.AllocClientStatusRunning: tgSummary.Running += 1 } - tgSummary.Queued -= 1 + if tgSummary.Queued > 0 { + tgSummary.Queued -= 1 + } } else if existingAlloc.ClientStatus != newAlloc.ClientStatus { // Incrementing the clint of the bin of the current state switch newAlloc.ClientStatus { diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 640df4094..9ce1c58e4 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -2383,13 +2383,72 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { if summary.Summary["web"].Queued != 0 || summary.Summary["web"].Starting != 1 || summary.Summary["web"].Complete != 2 { t.Fatalf("bad job summary: %v", summary) } +} - job.TaskGroups[0].Count = 1 - err = state.UpsertJob(1005, job) +func TestJobSummary_UpdateClientStatus(t *testing.T) { + state := testStateStore(t) + alloc := mock.Alloc() + job := alloc.Job + job.TaskGroups[0].Count = 3 + + alloc2 := mock.Alloc() + alloc2.Job = job + alloc2.JobID = job.ID + + alloc3 := mock.Alloc() + alloc3.Job = job + alloc3.JobID = job.ID + + err := state.UpsertJob(1000, job) if err != nil { t.Fatalf("err: %v", err) } + if err := state.UpsertAllocs(1001, []*structs.Allocation{alloc, alloc2, alloc3}); err != nil { + t.Fatalf("err: %v", err) + } + summary, _ := state.JobSummaryByID(job.ID) + if summary.Summary["web"].Queued != 0 || summary.Summary["web"].Starting != 3 { + t.Fatalf("bad job summary: %v", summary) + } + + alloc4 := mock.Alloc() + alloc4.ID = alloc2.ID + alloc4.Job = alloc2.Job + alloc4.JobID = alloc2.JobID + alloc4.ClientStatus = structs.AllocClientStatusComplete + + alloc5 := mock.Alloc() + alloc5.ID = alloc3.ID + alloc5.Job = alloc3.Job + alloc5.JobID = alloc3.JobID + alloc5.ClientStatus = structs.AllocClientStatusFailed + + alloc6 := mock.Alloc() + alloc6.ID = alloc.ID + alloc6.Job = alloc.Job + alloc6.JobID = alloc.JobID + alloc6.ClientStatus = structs.AllocClientStatusRunning + + if err := state.UpdateAllocsFromClient(1002, []*structs.Allocation{alloc4, alloc5, alloc6}); err != nil { + t.Fatalf("err: %v", err) + } + summary, _ = state.JobSummaryByID(job.ID) + if summary.Summary["web"].Queued != 0 || summary.Summary["web"].Running != 1 || summary.Summary["web"].Failed != 1 || summary.Summary["web"].Complete != 1 { + t.Fatalf("bad job summary: %v", summary) + } + + alloc7 := mock.Alloc() + alloc7.Job = alloc.Job + alloc7.JobID = alloc.JobID + + if err := state.UpsertAllocs(1003, []*structs.Allocation{alloc7}); err != nil { + t.Fatalf("err: %v", err) + } + summary, _ = state.JobSummaryByID(job.ID) + if summary.Summary["web"].Queued != 0 || summary.Summary["web"].Starting != 1 || summary.Summary["web"].Running != 1 || summary.Summary["web"].Failed != 1 || summary.Summary["web"].Complete != 1 { + t.Fatalf("bad job summary: %v", summary) + } } // setupNotifyTest takes a state store and a set of watch items, then creates