From ca588f9ce097330888f8f97b1b4e21bca88cf6d0 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Apr 2018 15:03:30 -0700 Subject: [PATCH] clarify comment --- scheduler/reconcile_util.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scheduler/reconcile_util.go b/scheduler/reconcile_util.go index 264fcb1f0..b59fd8209 100644 --- a/scheduler/reconcile_util.go +++ b/scheduler/reconcile_util.go @@ -507,8 +507,11 @@ func (a *allocNameIndex) NextCanaries(n uint, existing, destructive allocSet) [] } // We have exhausted the preferred and free set. Pick starting from n to - // n+remainder, to avoid overlapping where possible. - // indexes + // n+remainder, to avoid overlapping where possible. An example is the + // desired count is 3 and we want 5 canaries. The first 3 canaries can use + // index [0, 1, 2] but after that we prefer picking indexes [4, 5] so that + // we do not overlap. Once the canaries are promoted, these would be the + // allocations that would be shut down as well. for i := uint(a.count); i < uint(a.count)+remainder; i++ { name := structs.AllocName(a.job, a.taskGroup, i) next = append(next, name)