Fix typos

This commit is contained in:
Alex Dadgar
2018-04-10 15:02:52 -07:00
committed by Preetha Appan
parent b8aa63a780
commit f95230028d
10 changed files with 39 additions and 39 deletions

View File

@@ -49,7 +49,7 @@ func (d *deploymentWatcherRaftShim) UpdateDeploymentAllocHealth(req *structs.App
return d.convertApplyErrors(fsmErrIntf, index, raftErr)
}
func (d *deploymentWatcherRaftShim) UpdateAllocDesiredTransistion(req *structs.AllocUpdateDesiredTransitionRequest) (uint64, error) {
func (d *deploymentWatcherRaftShim) UpdateAllocDesiredTransition(req *structs.AllocUpdateDesiredTransitionRequest) (uint64, error) {
fsmErrIntf, index, raftErr := d.apply(structs.AllocUpdateDesiredTransitionRequestType, req)
return d.convertApplyErrors(fsmErrIntf, index, raftErr)
}

View File

@@ -7,7 +7,7 @@ import (
"github.com/hashicorp/nomad/nomad/structs"
)
// AllocUpdateBatcher is used to batch the updates to the desired transistions
// AllocUpdateBatcher is used to batch the updates to the desired transitions
// of allocations and the creation of evals.
type AllocUpdateBatcher struct {
// batch is the batching duration
@@ -24,7 +24,7 @@ type AllocUpdateBatcher struct {
}
// NewAllocUpdateBatcher returns an AllocUpdateBatcher that uses the passed raft endpoints to
// create the allocation desired transistion updates and new evaluations and
// create the allocation desired transition updates and new evaluations and
// exits the batcher when the passed exit channel is closed.
func NewAllocUpdateBatcher(batchDuration time.Duration, raft DeploymentRaftEndpoints, ctx context.Context) *AllocUpdateBatcher {
b := &AllocUpdateBatcher{
@@ -38,7 +38,7 @@ func NewAllocUpdateBatcher(batchDuration time.Duration, raft DeploymentRaftEndpo
return b
}
// CreateUpdate batches the allocation desired transistion update and returns a
// CreateUpdate batches the allocation desired transition update and returns a
// future that tracks the completion of the request.
func (b *AllocUpdateBatcher) CreateUpdate(allocs map[string]*structs.DesiredTransition, eval *structs.Evaluation) *BatchFuture {
wrapper := &updateWrapper{
@@ -100,7 +100,7 @@ func (b *AllocUpdateBatcher) batcher() {
}
// Upsert the evals in a go routine
go f.Set(b.raft.UpdateAllocDesiredTransistion(req))
go f.Set(b.raft.UpdateAllocDesiredTransition(req))
// Reset the evals list and timer
evals = make(map[string]*structs.Evaluation)

View File

@@ -23,10 +23,10 @@ const (
)
var (
// allowRescheduleTransistion is the transistion that allows failed
// allowRescheduleTransition is the transition that allows failed
// allocations part of a deployment to be rescheduled. We create a one off
// variable to avoid creating a new object for every request.
allowRescheduleTransistion = &structs.DesiredTransition{
allowRescheduleTransition = &structs.DesiredTransition{
Reschedule: helper.BoolToPtr(true),
}
)
@@ -34,7 +34,7 @@ var (
// deploymentTriggers are the set of functions required to trigger changes on
// behalf of a deployment
type deploymentTriggers interface {
// createUpdate is used to create allocation desired transistion updates and
// createUpdate is used to create allocation desired transition updates and
// an evaluation.
createUpdate(allocs map[string]*structs.DesiredTransition, eval *structs.Evaluation) (uint64, error)
@@ -360,12 +360,12 @@ func (w *deploymentWatcher) StopWatch() {
// deployment changes. Its function is to create evaluations to trigger the
// scheduler when more progress can be made, to fail the deployment if it has
// failed and potentially rolling back the job. Progress can be made when an
// allocation transistions to healthy, so we create an eval.
// allocation transitions to healthy, so we create an eval.
func (w *deploymentWatcher) watch() {
// Get the deadline. This is likely a zero time to begin with but we need to
// handle the case that the deployment has already progressed and we are now
// just starting to watch it. This must likely would occur if there was a
// leader transistion and we are now starting our watcher.
// leader transition and we are now starting our watcher.
currentDeadline := getDeploymentProgressCutoff(w.getDeployment())
var deadlineTimer *time.Timer
if currentDeadline.IsZero() {
@@ -631,7 +631,7 @@ func (w *deploymentWatcher) createBatchedUpdate(allowReplacements []string, forI
if w.outstandingAllowReplacements == nil {
w.outstandingAllowReplacements = make(map[string]*structs.DesiredTransition, len(allowReplacements))
}
w.outstandingAllowReplacements[allocID] = allowRescheduleTransistion
w.outstandingAllowReplacements[allocID] = allowRescheduleTransition
}
if w.outstandingBatch || (forIndex < w.latestEval && len(allowReplacements) == 0) {

View File

@@ -20,7 +20,7 @@ const (
LimitStateQueriesPerSecond = 100.0
// CrossDeploymentUpdateBatchDuration is the duration in which allocation
// desired transistion and evaluation creation updates are batched across
// desired transition and evaluation creation updates are batched across
// all deployment watchers before committing to Raft.
CrossDeploymentUpdateBatchDuration = 250 * time.Millisecond
)
@@ -48,9 +48,9 @@ type DeploymentRaftEndpoints interface {
// deployment
UpdateDeploymentAllocHealth(req *structs.ApplyDeploymentAllocHealthRequest) (uint64, error)
// UpdateAllocDesiredTransistion is used to update the desired transistion
// UpdateAllocDesiredTransition is used to update the desired transition
// for allocations.
UpdateAllocDesiredTransistion(req *structs.AllocUpdateDesiredTransitionRequest) (uint64, error)
UpdateAllocDesiredTransition(req *structs.AllocUpdateDesiredTransitionRequest) (uint64, error)
}
// Watcher is used to watch deployments and their allocations created
@@ -64,7 +64,7 @@ type Watcher struct {
queryLimiter *rate.Limiter
// updateBatchDuration is the duration to batch allocation desired
// transistion and eval creation across all deployment watchers
// transition and eval creation across all deployment watchers
updateBatchDuration time.Duration
// raft contains the set of Raft endpoints that can be used by the
@@ -78,7 +78,7 @@ type Watcher struct {
watchers map[string]*deploymentWatcher
// allocUpdateBatcher is used to batch the creation of evaluations and
// allocation desired transistion updates
// allocation desired transition updates
allocUpdateBatcher *AllocUpdateBatcher
// ctx and exitFn are used to cancel the watcher
@@ -357,7 +357,7 @@ func (w *Watcher) FailDeployment(req *structs.DeploymentFailRequest, resp *struc
return watcher.FailDeployment(req, resp)
}
// createUpdate commits the given allocation desired transistion and evaluation
// createUpdate commits the given allocation desired transition and evaluation
// to Raft but batches the commit with other calls.
func (w *Watcher) createUpdate(allocs map[string]*structs.DesiredTransition, eval *structs.Evaluation) (uint64, error) {
return w.allocUpdateBatcher.CreateUpdate(allocs, eval).Results()

View File

@@ -714,8 +714,8 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) {
// Assert that we will get a update allocation call only once. This will
// verify that the watcher is batching allocation changes
m1 := matchUpdateAllocDesiredTransistions([]string{d.ID})
m.On("UpdateAllocDesiredTransistion", mocker.MatchedBy(m1)).Return(nil).Once()
m1 := matchUpdateAllocDesiredTransitions([]string{d.ID})
m.On("UpdateAllocDesiredTransition", mocker.MatchedBy(m1)).Return(nil).Once()
// Assert that we get a call to UpsertDeploymentStatusUpdate
c := &matchDeploymentStatusUpdateConfig{
@@ -787,7 +787,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) {
t.Fatal(err)
})
m.AssertCalled(t, "UpdateAllocDesiredTransistion", mocker.MatchedBy(m1))
m.AssertCalled(t, "UpdateAllocDesiredTransition", mocker.MatchedBy(m1))
// After we upsert the job version will go to 2. So use this to assert the
// original call happened.
@@ -907,8 +907,8 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) {
// Assert that we will get a createEvaluation call only once. This will
// verify that the watcher is batching allocation changes
m1 := matchUpdateAllocDesiredTransistions([]string{d.ID})
m.On("UpdateAllocDesiredTransistion", mocker.MatchedBy(m1)).Return(nil).Once()
m1 := matchUpdateAllocDesiredTransitions([]string{d.ID})
m.On("UpdateAllocDesiredTransition", mocker.MatchedBy(m1)).Return(nil).Once()
// Assert that we get a call to UpsertDeploymentStatusUpdate with roll back failed as the status
c := &matchDeploymentStatusUpdateConfig{
@@ -980,7 +980,7 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) {
t.Fatal(err)
})
m.AssertCalled(t, "UpdateAllocDesiredTransistion", mocker.MatchedBy(m1))
m.AssertCalled(t, "UpdateAllocDesiredTransition", mocker.MatchedBy(m1))
// verify that the job version hasn't changed after upsert
m.state.JobByID(nil, structs.DefaultNamespace, j.ID)
@@ -1024,8 +1024,8 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) {
// Assert that we will get a createEvaluation call only once and it contains
// both deployments. This will verify that the watcher is batching
// allocation changes
m1 := matchUpdateAllocDesiredTransistions([]string{d1.ID, d2.ID})
m.On("UpdateAllocDesiredTransistion", mocker.MatchedBy(m1)).Return(nil).Once()
m1 := matchUpdateAllocDesiredTransitions([]string{d1.ID, d2.ID})
m.On("UpdateAllocDesiredTransition", mocker.MatchedBy(m1)).Return(nil).Once()
w.SetEnabled(true, m.state)
testutil.WaitForResult(func() (bool, error) { return 2 == len(w.watchers), nil },
@@ -1074,7 +1074,7 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) {
t.Fatal(err)
})
m.AssertCalled(t, "UpdateAllocDesiredTransistion", mocker.MatchedBy(m1))
m.AssertCalled(t, "UpdateAllocDesiredTransition", mocker.MatchedBy(m1))
testutil.WaitForResult(func() (bool, error) { return 2 == len(w.watchers), nil },
func(err error) { assert.Equal(2, len(w.watchers), "Should have 2 deployment") })
}

View File

@@ -39,14 +39,14 @@ func (m *mockBackend) nextIndex() uint64 {
return i
}
func (m *mockBackend) UpdateAllocDesiredTransistion(u *structs.AllocUpdateDesiredTransitionRequest) (uint64, error) {
func (m *mockBackend) UpdateAllocDesiredTransition(u *structs.AllocUpdateDesiredTransitionRequest) (uint64, error) {
m.Called(u)
i := m.nextIndex()
return i, m.state.UpdateAllocsDesiredTransitions(i, u.Allocs, u.Evals)
}
// matchUpdateAllocDesiredTransistions is used to match an upsert request
func matchUpdateAllocDesiredTransistions(deploymentIDs []string) func(update *structs.AllocUpdateDesiredTransitionRequest) bool {
// matchUpdateAllocDesiredTransitions is used to match an upsert request
func matchUpdateAllocDesiredTransitions(deploymentIDs []string) func(update *structs.AllocUpdateDesiredTransitionRequest) bool {
return func(update *structs.AllocUpdateDesiredTransitionRequest) bool {
if len(update.Evals) != len(deploymentIDs) {
return false

View File

@@ -254,7 +254,7 @@ func (n *NodeDrainer) handleDeadlinedNodes(nodes []string) {
n.l.RUnlock()
n.batchDrainAllocs(forceStop)
// Submit the node transistions in a sharded form to ensure a reasonable
// Submit the node transitions in a sharded form to ensure a reasonable
// Raft transaction size.
for _, nodes := range partitionIds(defaultMaxIdsPerTxn, nodes) {
if _, err := n.raft.NodesDrainComplete(nodes); err != nil {
@@ -324,7 +324,7 @@ func (n *NodeDrainer) handleMigratedAllocs(allocs []*structs.Allocation) {
}
}
// Submit the node transistions in a sharded form to ensure a reasonable
// Submit the node transitions in a sharded form to ensure a reasonable
// Raft transaction size.
for _, nodes := range partitionIds(defaultMaxIdsPerTxn, done) {
if _, err := n.raft.NodesDrainComplete(nodes); err != nil {
@@ -374,9 +374,9 @@ func (n *NodeDrainer) batchDrainAllocs(allocs []*structs.Allocation) (uint64, er
func (n *NodeDrainer) drainAllocs(future *structs.BatchFuture, allocs []*structs.Allocation) {
// Compute the effected jobs and make the transition map
jobs := make(map[string]*structs.Allocation, 4)
transistions := make(map[string]*structs.DesiredTransition, len(allocs))
transitions := make(map[string]*structs.DesiredTransition, len(allocs))
for _, alloc := range allocs {
transistions[alloc.ID] = &structs.DesiredTransition{
transitions[alloc.ID] = &structs.DesiredTransition{
Migrate: helper.BoolToPtr(true),
}
jobs[alloc.JobID] = alloc
@@ -397,7 +397,7 @@ func (n *NodeDrainer) drainAllocs(future *structs.BatchFuture, allocs []*structs
// Commit this update via Raft
var finalIndex uint64
for _, u := range partitionAllocDrain(defaultMaxIdsPerTxn, transistions, evals) {
for _, u := range partitionAllocDrain(defaultMaxIdsPerTxn, transitions, evals) {
index, err := n.raft.AllocUpdateDesiredTransition(u.Transitions, u.Evals)
if err != nil {
future.Respond(0, err)

View File

@@ -13,9 +13,9 @@ func TestDrainer_PartitionAllocDrain(t *testing.T) {
maxIdsPerTxn := 2
require := require.New(t)
transistions := map[string]*structs.DesiredTransition{"a": nil, "b": nil, "c": nil}
transitions := map[string]*structs.DesiredTransition{"a": nil, "b": nil, "c": nil}
evals := []*structs.Evaluation{nil, nil, nil}
requests := partitionAllocDrain(maxIdsPerTxn, transistions, evals)
requests := partitionAllocDrain(maxIdsPerTxn, transitions, evals)
require.Len(requests, 3)
first := requests[0]

View File

@@ -824,7 +824,7 @@ func TestDrainer_AllTypes_Deadline_GarbageCollectedNode(t *testing.T) {
})
}
// Test that transistions to force drain work.
// Test that transitions to force drain work.
func TestDrainer_Batch_TransitionToForce(t *testing.T) {
t.Parallel()
require := require.New(t)

View File

@@ -476,7 +476,7 @@ func (n *Node) UpdateDrain(args *structs.NodeUpdateDrainRequest,
}
reply.NodeModifyIndex = index
// If the node is transistioning to be eligible, create Node evaluations
// If the node is transitioning to be eligible, create Node evaluations
// because there may be a System job registered that should be evaluated.
if node.SchedulingEligibility == structs.NodeSchedulingIneligible && args.MarkEligible && args.DrainStrategy == nil {
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, index)
@@ -556,7 +556,7 @@ func (n *Node) UpdateEligibility(args *structs.NodeUpdateEligibilityRequest,
}
}
// If the node is transistioning to be eligible, create Node evaluations
// If the node is transitioning to be eligible, create Node evaluations
// because there may be a System job registered that should be evaluated.
if node.SchedulingEligibility == structs.NodeSchedulingIneligible && args.Eligibility == structs.NodeSchedulingEligible {
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, index)