mirror of
https://github.com/kemko/nomad.git
synced 2026-01-06 18:35:44 +03:00
Fix some comments and lint warnings, remove unused method
This commit is contained in:
@@ -294,46 +294,6 @@ func (s *GenericScheduler) process() (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// filterCompleteAllocs filters allocations that are terminal and should be
|
||||
// re-placed.
|
||||
func (s *GenericScheduler) filterCompleteAllocs(allocs []*structs.Allocation) []*structs.Allocation {
|
||||
filter := func(a *structs.Allocation) bool {
|
||||
if s.batch {
|
||||
// Allocs from batch jobs should be filtered when the desired status
|
||||
// is terminal and the client did not finish or when the client
|
||||
// status is failed so that they will be replaced. If they are
|
||||
// complete but not failed, they shouldn't be replaced.
|
||||
switch a.DesiredStatus {
|
||||
case structs.AllocDesiredStatusStop, structs.AllocDesiredStatusEvict:
|
||||
return !a.RanSuccessfully()
|
||||
default:
|
||||
}
|
||||
|
||||
switch a.ClientStatus {
|
||||
case structs.AllocClientStatusFailed:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Filter terminal, non batch allocations
|
||||
return a.TerminalStatus()
|
||||
}
|
||||
|
||||
n := len(allocs)
|
||||
for i := 0; i < n; i++ {
|
||||
if filter(allocs[i]) {
|
||||
// Remove the allocation
|
||||
allocs[i], allocs[n-1] = allocs[n-1], nil
|
||||
i--
|
||||
n--
|
||||
}
|
||||
}
|
||||
|
||||
return allocs[:n]
|
||||
}
|
||||
|
||||
// computeJobAllocs is used to reconcile differences between the job,
|
||||
// existing allocations and node status to update the allocations.
|
||||
func (s *GenericScheduler) computeJobAllocs() error {
|
||||
@@ -473,7 +433,7 @@ func (s *GenericScheduler) computePlacements(destructive, place []placementResul
|
||||
s.plan.AppendUpdate(prevAllocation, structs.AllocDesiredStatusStop, stopPrevAllocDesc, "")
|
||||
}
|
||||
|
||||
// Setup node weights for replacement allocations
|
||||
// Compute penalty nodes for rescheduled allocs
|
||||
selectOptions := &SelectOptions{}
|
||||
if prevAllocation != nil {
|
||||
var penaltyNodes []string
|
||||
|
||||
@@ -620,7 +620,7 @@ func (a *allocReconciler) computePlacements(group *structs.TaskGroup,
|
||||
return nil
|
||||
}
|
||||
var place []allocPlaceResult
|
||||
// add rescheduled alloc placement results
|
||||
// Add rescheduled placement results
|
||||
for _, alloc := range reschedule {
|
||||
place = append(place, allocPlaceResult{
|
||||
name: alloc.Name,
|
||||
@@ -632,7 +632,7 @@ func (a *allocReconciler) computePlacements(group *structs.TaskGroup,
|
||||
break
|
||||
}
|
||||
}
|
||||
// add remaining
|
||||
// Add remaining placement results
|
||||
if existing < group.Count {
|
||||
for _, name := range nameIndex.Next(uint(group.Count - existing)) {
|
||||
place = append(place, allocPlaceResult{
|
||||
|
||||
@@ -266,7 +266,7 @@ func (a allocSet) filterByRescheduleable(isBatch bool, reschedulePolicy *structs
|
||||
}
|
||||
}
|
||||
// Delete these from rescheduleable allocs
|
||||
for allocId, _ := range rescheduledPrevAllocs {
|
||||
for allocId := range rescheduledPrevAllocs {
|
||||
delete(reschedule, allocId)
|
||||
}
|
||||
return
|
||||
|
||||
@@ -511,7 +511,7 @@ func inplaceUpdate(ctx Context, eval *structs.Evaluation, job *structs.Job,
|
||||
allocInPlace, "")
|
||||
|
||||
// Attempt to match the task group
|
||||
option, _ := stack.Select(update.TaskGroup, nil) // This select only looks at one node so we don't pass any node weight options
|
||||
option, _ := stack.Select(update.TaskGroup, nil) // This select only looks at one node so we don't pass selectOptions
|
||||
|
||||
// Pop the allocation
|
||||
ctx.Plan().PopUpdate(update.Alloc)
|
||||
@@ -767,7 +767,7 @@ func genericAllocUpdateFn(ctx Context, stack Stack, evalID string) allocUpdateTy
|
||||
ctx.Plan().AppendUpdate(existing, structs.AllocDesiredStatusStop, allocInPlace, "")
|
||||
|
||||
// Attempt to match the task group
|
||||
option, _ := stack.Select(newTG, nil) // This select only looks at one node so we don't pass any node weight options
|
||||
option, _ := stack.Select(newTG, nil) // This select only looks at one node so we don't pass selectOptions
|
||||
|
||||
// Pop the allocation
|
||||
ctx.Plan().PopUpdate(existing)
|
||||
|
||||
Reference in New Issue
Block a user