mirror of
https://github.com/kemko/nomad.git
synced 2026-01-01 16:05:42 +03:00
Test for rescheduling when there are canaries
This commit is contained in:
committed by
Preetha Appan
parent
ff7b1bebcc
commit
588bf68d45
@@ -1818,6 +1818,113 @@ func TestReconciler_RescheduleNow_EvalIDMatch(t *testing.T) {
|
||||
assertPlacementsAreRescheduled(t, 1, r.place)
|
||||
}
|
||||
|
||||
// Tests rescheduling failed service allocations when there are canaries
|
||||
func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
// Set desired 5
|
||||
job := mock.Job()
|
||||
job.TaskGroups[0].Count = 5
|
||||
tgName := job.TaskGroups[0].Name
|
||||
now := time.Now()
|
||||
|
||||
// Set up reschedule policy and update stanza
|
||||
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
||||
Attempts: 1,
|
||||
Interval: 24 * time.Hour,
|
||||
Delay: 5 * time.Second,
|
||||
DelayFunction: "",
|
||||
MaxDelay: 1 * time.Hour,
|
||||
Unlimited: false,
|
||||
}
|
||||
job.TaskGroups[0].Update = canaryUpdate
|
||||
|
||||
job2 := job.Copy()
|
||||
job2.Version++
|
||||
|
||||
d := structs.NewDeployment(job2)
|
||||
d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
||||
s := &structs.DeploymentState{
|
||||
DesiredCanaries: 2,
|
||||
DesiredTotal: 5,
|
||||
}
|
||||
d.TaskGroups[job.TaskGroups[0].Name] = s
|
||||
|
||||
// Create 5 existing allocations
|
||||
var allocs []*structs.Allocation
|
||||
for i := 0; i < 5; i++ {
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job = job
|
||||
alloc.JobID = job.ID
|
||||
alloc.NodeID = uuid.Generate()
|
||||
alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
|
||||
allocs = append(allocs, alloc)
|
||||
alloc.ClientStatus = structs.AllocClientStatusRunning
|
||||
}
|
||||
|
||||
// Mark three as failed
|
||||
allocs[0].ClientStatus = structs.AllocClientStatusFailed
|
||||
|
||||
// Mark one of them as already rescheduled once
|
||||
allocs[0].RescheduleTracker = &structs.RescheduleTracker{Events: []*structs.RescheduleEvent{
|
||||
{RescheduleTime: time.Now().Add(-1 * time.Hour).UTC().UnixNano(),
|
||||
PrevAllocID: uuid.Generate(),
|
||||
PrevNodeID: uuid.Generate(),
|
||||
},
|
||||
}}
|
||||
allocs[1].TaskStates = map[string]*structs.TaskState{tgName: {State: "start",
|
||||
StartedAt: now.Add(-1 * time.Hour),
|
||||
FinishedAt: now.Add(-10 * time.Second)}}
|
||||
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
||||
|
||||
// Mark one as desired state stop
|
||||
allocs[4].ClientStatus = structs.AllocClientStatusFailed
|
||||
|
||||
// Create 2 canary allocations
|
||||
for i := 0; i < 2; i++ {
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job = job
|
||||
alloc.JobID = job.ID
|
||||
alloc.NodeID = uuid.Generate()
|
||||
alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
|
||||
alloc.ClientStatus = structs.AllocClientStatusRunning
|
||||
alloc.DeploymentID = d.ID
|
||||
alloc.DeploymentStatus = &structs.AllocDeploymentStatus{
|
||||
Canary: true,
|
||||
Healthy: helper.BoolToPtr(false),
|
||||
}
|
||||
s.PlacedCanaries = append(s.PlacedCanaries, alloc.ID)
|
||||
allocs = append(allocs, alloc)
|
||||
}
|
||||
|
||||
reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job2, d, allocs, nil, "")
|
||||
r := reconciler.Compute()
|
||||
|
||||
// Verify that no follow up evals were created
|
||||
evals := r.desiredFollowupEvals[tgName]
|
||||
require.Nil(evals)
|
||||
|
||||
// Verify that one rescheduled alloc and one replacement for terminal alloc were placed
|
||||
assertResults(t, r, &resultExpectation{
|
||||
createDeployment: nil,
|
||||
deploymentUpdates: nil,
|
||||
place: 2,
|
||||
inplace: 0,
|
||||
stop: 0,
|
||||
desiredTGUpdates: map[string]*structs.DesiredUpdates{
|
||||
job.TaskGroups[0].Name: {
|
||||
Place: 2,
|
||||
Ignore: 5,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Rescheduled allocs should have previous allocs
|
||||
assertNamesHaveIndexes(t, intRange(1, 1, 4, 4), placeResultsToNames(r.place))
|
||||
assertPlaceResultsHavePreviousAllocs(t, 2, r.place)
|
||||
assertPlacementsAreRescheduled(t, 2, r.place)
|
||||
}
|
||||
|
||||
// Tests failed service allocations that were already rescheduled won't be rescheduled again
|
||||
func TestReconciler_DontReschedule_PreviouslyRescheduled(t *testing.T) {
|
||||
// Set desired 5
|
||||
|
||||
Reference in New Issue
Block a user