From 7a569715082d4996765134aee023232459cdef03 Mon Sep 17 00:00:00 2001 From: Luke Farnell Date: Mon, 7 Aug 2017 17:13:05 -0400 Subject: [PATCH] fixed all spelling mistakes for goreport --- api/fs.go | 2 +- api/jobs.go | 2 +- client/allocdir/task_dir_test.go | 2 +- client/driver/docker.go | 2 +- client/driver/docker_coordinator.go | 4 ++-- .../driver/logging/syslog_server_unix_test.go | 2 +- client/driver/structs/structs_default.go | 2 +- client/fingerprint/network_test.go | 6 ++--- client/gc.go | 2 +- client/task_runner.go | 2 +- client/task_runner_test.go | 2 +- command/agent/config.go | 4 ++-- command/agent/consul/client.go | 4 ++-- command/agent/consul/unit_test.go | 4 ++-- command/agent/fs_endpoint.go | 2 +- command/agent/fs_endpoint_test.go | 2 +- command/check.go | 2 +- command/fs.go | 2 +- command/job_dispatch.go | 6 ++--- command/job_history.go | 2 +- nomad/blocked_evals_test.go | 2 +- nomad/deployment_watcher_shims.go | 4 ++-- nomad/deploymentwatcher/deployment_watcher.go | 2 +- .../deploymentwatcher/deployments_watcher.go | 2 +- nomad/job_endpoint_test.go | 4 ++-- nomad/node_endpoint.go | 2 +- nomad/node_endpoint_test.go | 2 +- nomad/server.go | 4 ++-- nomad/state/state_store.go | 4 ++-- nomad/state/state_store_test.go | 24 +++++++++---------- nomad/structs/structs.go | 16 ++++++------- nomad/vault.go | 6 ++--- nomad/vault_test.go | 2 +- nomad/worker.go | 2 +- scheduler/reconcile.go | 6 ++--- scheduler/reconcile_test.go | 2 +- scheduler/reconcile_util.go | 4 ++-- scheduler/system_sched_test.go | 2 +- 38 files changed, 73 insertions(+), 73 deletions(-) diff --git a/api/fs.go b/api/fs.go index ec8551664..73ae71c97 100644 --- a/api/fs.go +++ b/api/fs.go @@ -50,7 +50,7 @@ func (c *Client) AllocFS() *AllocFS { } // getNodeClient returns a Client that will dial the node. If the QueryOptions -// is set, the function will ensure that it is initalized and that the Params +// is set, the function will ensure that it is initialized and that the Params // field is valid. func (a *AllocFS) getNodeClient(node *Node, allocID string, q **QueryOptions) (*Client, error) { if node.HTTPAddr == "" { diff --git a/api/jobs.go b/api/jobs.go index f89b9a5d6..a891ba084 100644 --- a/api/jobs.go +++ b/api/jobs.go @@ -709,7 +709,7 @@ type JobValidateResponse struct { // ValidationErrors is a list of validation errors ValidationErrors []string - // Error is a string version of any error that may have occured + // Error is a string version of any error that may have occurred Error string // Warnings contains any warnings about the given job. These may include diff --git a/client/allocdir/task_dir_test.go b/client/allocdir/task_dir_test.go index 6521b6b87..56c00999c 100644 --- a/client/allocdir/task_dir_test.go +++ b/client/allocdir/task_dir_test.go @@ -80,7 +80,7 @@ func TestTaskDir_EmbedDirs(t *testing.T) { exp := []string{filepath.Join(td.Dir, taskDest, file), filepath.Join(td.Dir, taskDest, subDirName, subFile)} for _, f := range exp { if _, err := os.Stat(f); os.IsNotExist(err) { - t.Fatalf("File %v not embeded: %v", f, err) + t.Fatalf("File %v not embedded: %v", f, err) } } } diff --git a/client/driver/docker.go b/client/driver/docker.go index 8c6a60ba2..5372abbb5 100644 --- a/client/driver/docker.go +++ b/client/driver/docker.go @@ -188,7 +188,7 @@ func NewDockerDriverConfig(task *structs.Task, env *env.TaskEnv) (*DockerDriverC return nil, err } - // Interpolate everthing that is a string + // Interpolate everything that is a string dconf.ImageName = env.ReplaceEnv(dconf.ImageName) dconf.Command = env.ReplaceEnv(dconf.Command) dconf.IpcMode = env.ReplaceEnv(dconf.IpcMode) diff --git a/client/driver/docker_coordinator.go b/client/driver/docker_coordinator.go index 30a97ae87..cfbdea8f2 100644 --- a/client/driver/docker_coordinator.go +++ b/client/driver/docker_coordinator.go @@ -26,7 +26,7 @@ var ( ) // pullFuture is a sharable future for retrieving a pulled images ID and any -// error that may have occured during the pull. +// error that may have occurred during the pull. type pullFuture struct { waitCh chan struct{} @@ -129,7 +129,7 @@ func GetDockerCoordinator(config *dockerCoordinatorConfig) *dockerCoordinator { } // PullImage is used to pull an image. It returns the pulled imaged ID or an -// error that occured during the pull +// error that occurred during the pull func (d *dockerCoordinator) PullImage(image string, authOptions *docker.AuthConfiguration, callerID string) (imageID string, err error) { // Get the future d.imageLock.Lock() diff --git a/client/driver/logging/syslog_server_unix_test.go b/client/driver/logging/syslog_server_unix_test.go index 2e39e9dbf..e031bf941 100644 --- a/client/driver/logging/syslog_server_unix_test.go +++ b/client/driver/logging/syslog_server_unix_test.go @@ -13,7 +13,7 @@ func TestSyslogServer_Start_Shutdown(t *testing.T) { t.Parallel() dir, err := ioutil.TempDir("", "sock") if err != nil { - t.Fatalf("Failed to create temporary direcotry: %v", err) + t.Fatalf("Failed to create temporary directory: %v", err) } sock := path.Join(dir, "socket") diff --git a/client/driver/structs/structs_default.go b/client/driver/structs/structs_default.go index c14afe407..d45599179 100644 --- a/client/driver/structs/structs_default.go +++ b/client/driver/structs/structs_default.go @@ -6,7 +6,7 @@ package structs // uses to put resource constraints and isolation on the user process. The // default implementation is empty. Platforms that support resource isolation // (e.g. Linux's Cgroups) should build their own platform-specific copy. This -// information is transmitted via RPC so it is not permissable to change the +// information is transmitted via RPC so it is not permissible to change the // API. type IsolationConfig struct { } diff --git a/client/fingerprint/network_test.go b/client/fingerprint/network_test.go index eaa638b56..62124a0f5 100644 --- a/client/fingerprint/network_test.go +++ b/client/fingerprint/network_test.go @@ -186,7 +186,7 @@ func TestNetworkFingerprint_basic(t *testing.T) { t.Fatal("Expected Network Resource to have a Device Name") } if net.MBits != 101 { - t.Fatalf("Expected Network Resource to have bandwith %d; got %d", 101, net.MBits) + t.Fatalf("Expected Network Resource to have bandwidth %d; got %d", 101, net.MBits) } } @@ -263,7 +263,7 @@ func TestNetworkFingerPrint_default_device(t *testing.T) { t.Fatal("Expected Network Resource to have a Device Name") } if net.MBits == 0 { - t.Fatal("Expected Network Resource to have a non-zero bandwith") + t.Fatal("Expected Network Resource to have a non-zero bandwidth") } } @@ -306,7 +306,7 @@ func TestNetworkFingerPrint_excludelo_down_interfaces(t *testing.T) { t.Fatal("Expected Network Resource to be eth0. Actual: ", net.Device) } if net.MBits == 0 { - t.Fatal("Expected Network Resource to have a non-zero bandwith") + t.Fatal("Expected Network Resource to have a non-zero bandwidth") } // Test the CIDR of the IPs diff --git a/client/gc.go b/client/gc.go index d6b58d777..cc2b92314 100644 --- a/client/gc.go +++ b/client/gc.go @@ -236,7 +236,7 @@ func (a *AllocGarbageCollector) MakeRoomFor(allocations []*structs.Allocation) e } } - // If the host has enough free space to accomodate the new allocations then + // If the host has enough free space to accommodate the new allocations then // we don't need to garbage collect terminated allocations if hostStats := a.statsCollector.Stats(); hostStats != nil { var availableForAllocations uint64 diff --git a/client/task_runner.go b/client/task_runner.go index 4e0ca6abd..7110cc1b7 100644 --- a/client/task_runner.go +++ b/client/task_runner.go @@ -159,7 +159,7 @@ type TaskRunner struct { persistLock sync.Mutex // persistedHash is the hash of the last persisted snapshot. It is used to - // detect if a new snapshot has to be writen to disk. + // detect if a new snapshot has to be written to disk. persistedHash []byte } diff --git a/client/task_runner_test.go b/client/task_runner_test.go index 7e5f1151d..0d5f17d69 100644 --- a/client/task_runner_test.go +++ b/client/task_runner_test.go @@ -718,7 +718,7 @@ func TestTaskRunner_RestartTask(t *testing.T) { t.Fatalf("Eighth Event was %v; want %v", ctx.upd.events[8].Type, structs.TaskStarted) } if ctx.upd.events[8].Type != structs.TaskKilling { - t.Fatalf("Nineth Event was %v; want %v", ctx.upd.events[8].Type, structs.TaskKilling) + t.Fatalf("Ninth Event was %v; want %v", ctx.upd.events[8].Type, structs.TaskKilling) } if ctx.upd.events[9].Type != structs.TaskKilled { diff --git a/command/agent/config.go b/command/agent/config.go index 4940719b0..4c07e3436 100644 --- a/command/agent/config.go +++ b/command/agent/config.go @@ -374,7 +374,7 @@ type Telemetry struct { // check, it will *NOT* be activated. This setting overrides that behavior. // Default: "false" CirconusCheckForceMetricActivation string `mapstructure:"circonus_check_force_metric_activation"` - // CirconusCheckInstanceID serves to uniquely identify the metrics comming from this "instance". + // CirconusCheckInstanceID serves to uniquely identify the metrics coming from this "instance". // It can be used to maintain metric continuity with transient or ephemeral instances as // they move around within an infrastructure. // Default: hostname:app @@ -441,7 +441,7 @@ type Resources struct { } // ParseReserved expands the ReservedPorts string into a slice of port numbers. -// The supported syntax is comma seperated integers or ranges seperated by +// The supported syntax is comma separated integers or ranges separated by // hyphens. For example, "80,120-150,160" func (r *Resources) ParseReserved() error { parts := strings.Split(r.ReservedPorts, ",") diff --git a/command/agent/consul/client.go b/command/agent/consul/client.go index 0900c82b3..bd51f2338 100644 --- a/command/agent/consul/client.go +++ b/command/agent/consul/client.go @@ -516,7 +516,7 @@ func (c *ServiceClient) checkRegs(ops *operations, allocID, serviceID string, se return nil } -// RegisterTask with Consul. Adds all sevice entries and checks to Consul. If +// RegisterTask with Consul. Adds all service entries and checks to Consul. If // exec is nil and a script check exists an error is returned. // // If the service IP is set it used as the address in the service registration. @@ -555,7 +555,7 @@ func (c *ServiceClient) UpdateTask(allocID string, existing, newTask *structs.Ta for existingID, existingSvc := range existingIDs { newSvc, ok := newIDs[existingID] if !ok { - // Existing sevice entry removed + // Existing service entry removed ops.deregServices = append(ops.deregServices, existingID) for _, check := range existingSvc.Checks { ops.deregChecks = append(ops.deregChecks, makeCheckID(existingID, check)) diff --git a/command/agent/consul/unit_test.go b/command/agent/consul/unit_test.go index c3841b672..330087a42 100644 --- a/command/agent/consul/unit_test.go +++ b/command/agent/consul/unit_test.go @@ -442,7 +442,7 @@ func TestConsul_ChangePorts(t *testing.T) { t.Errorf("expected Port y=%v but found: %v", expected, v.HTTP) } default: - t.Errorf("Unkown check: %q", k) + t.Errorf("Unknown check: %q", k) } } } @@ -542,7 +542,7 @@ func TestConsul_ChangeChecks(t *testing.T) { t.Errorf("expected Port x=%v but found: %v", expected, v.HTTP) } default: - t.Errorf("Unkown check: %q", k) + t.Errorf("Unknown check: %q", k) } } } diff --git a/command/agent/fs_endpoint.go b/command/agent/fs_endpoint.go index ac775d422..dc7b0fb6b 100644 --- a/command/agent/fs_endpoint.go +++ b/command/agent/fs_endpoint.go @@ -423,7 +423,7 @@ func (s *StreamFramer) readData() []byte { // Send creates and sends a StreamFrame based on the passed parameters. An error // is returned if the run routine hasn't run or encountered an error. Send is -// asyncronous and does not block for the data to be transferred. +// asynchronous and does not block for the data to be transferred. func (s *StreamFramer) Send(file, fileEvent string, data []byte, offset int64) error { s.l.Lock() defer s.l.Unlock() diff --git a/command/agent/fs_endpoint_test.go b/command/agent/fs_endpoint_test.go index ba461ef6d..2368f27fe 100644 --- a/command/agent/fs_endpoint_test.go +++ b/command/agent/fs_endpoint_test.go @@ -1201,7 +1201,7 @@ func TestLogs_findClosest(t *testing.T) { Error: true, }, - // Test begining cases + // Test beginning cases { Entries: entries, DesiredIdx: 0, diff --git a/command/check.go b/command/check.go index 8a13fe4a7..c37bf858a 100644 --- a/command/check.go +++ b/command/check.go @@ -35,7 +35,7 @@ Agent Check Options: Minimum number of peers that a server is expected to know. -min-servers - Minumum number of servers that a client is expected to know. + Minimum number of servers that a client is expected to know. ` return strings.TrimSpace(helpText) diff --git a/command/fs.go b/command/fs.go index f383e5610..b9faadd57 100644 --- a/command/fs.go +++ b/command/fs.go @@ -21,7 +21,7 @@ const ( bytesToLines int64 = 120 // defaultTailLines is the number of lines to tail by default if the value - // is not overriden. + // is not overridden. defaultTailLines int64 = 10 ) diff --git a/command/job_dispatch.go b/command/job_dispatch.go index 13b180ea9..52f96b42e 100644 --- a/command/job_dispatch.go +++ b/command/job_dispatch.go @@ -18,7 +18,7 @@ func (c *JobDispatchCommand) Help() string { Usage: nomad job dispatch [options] [input source] Dispatch creates an instance of a parameterized job. A data payload to the -dispatched instance can be provided via stdin by using "-" or by specifiying a +dispatched instance can be provided via stdin by using "-" or by specifying a path to a file. Metadata can be supplied by using the meta flag one or more times. @@ -33,9 +33,9 @@ General Options: Dispatch Options: -meta = - Meta takes a key/value pair seperated by "=". The metadata key will be + Meta takes a key/value pair separated by "=". The metadata key will be merged into the job's metadata. The job may define a default value for the - key which is overriden when dispatching. The flag can be provided more than + key which is overridden when dispatching. The flag can be provided more than once to inject multiple metadata key/value pairs. Arbitrary keys are not allowed. The parameterized job must allow the key to be merged. diff --git a/command/job_history.go b/command/job_history.go index a4b401b20..d3712ba14 100644 --- a/command/job_history.go +++ b/command/job_history.go @@ -21,7 +21,7 @@ Usage: nomad job history [options] History is used to display the known versions of a particular job. The command can display the diff between job versions and can be useful for understanding -the changes that occured to the job as well as deciding job versions to revert +the changes that occurred to the job as well as deciding job versions to revert to. General Options: diff --git a/nomad/blocked_evals_test.go b/nomad/blocked_evals_test.go index c26988994..54dc094d8 100644 --- a/nomad/blocked_evals_test.go +++ b/nomad/blocked_evals_test.go @@ -296,7 +296,7 @@ func TestBlockedEvals_Reblock(t *testing.T) { t.Fatalf("bad: %#v", brokerStats) } - // Ack the evaluation which should cause the reblocked eval to transistion + // Ack the evaluation which should cause the reblocked eval to transition // to ready if err := broker.Ack(e.ID, token); err != nil { t.Fatalf("err: %v", err) diff --git a/nomad/deployment_watcher_shims.go b/nomad/deployment_watcher_shims.go index 9dc4bb842..2640720d2 100644 --- a/nomad/deployment_watcher_shims.go +++ b/nomad/deployment_watcher_shims.go @@ -90,8 +90,8 @@ type deploymentWatcherRaftShim struct { } // convertApplyErrors parses the results of a raftApply and returns the index at -// which it was applied and any error that occured. Raft Apply returns two -// seperate errors, Raft library errors and user returned errors from the FSM. +// which it was applied and any error that occurred. Raft Apply returns two +// separate errors, Raft library errors and user returned errors from the FSM. // This helper, joins the errors by inspecting the applyResponse for an error. func (d *deploymentWatcherRaftShim) convertApplyErrors(applyResp interface{}, index uint64, err error) (uint64, error) { if applyResp != nil { diff --git a/nomad/deploymentwatcher/deployment_watcher.go b/nomad/deploymentwatcher/deployment_watcher.go index 1eb188d53..f4f4c9674 100644 --- a/nomad/deploymentwatcher/deployment_watcher.go +++ b/nomad/deploymentwatcher/deployment_watcher.go @@ -203,7 +203,7 @@ func (w *deploymentWatcher) PromoteDeployment( func (w *deploymentWatcher) PauseDeployment( req *structs.DeploymentPauseRequest, resp *structs.DeploymentUpdateResponse) error { - // Determine the status we should transistion to and if we need to create an + // Determine the status we should transition to and if we need to create an // evaluation status, desc := structs.DeploymentStatusPaused, structs.DeploymentStatusDescriptionPaused var eval *structs.Evaluation diff --git a/nomad/deploymentwatcher/deployments_watcher.go b/nomad/deploymentwatcher/deployments_watcher.go index 1822405df..0deaf34d9 100644 --- a/nomad/deploymentwatcher/deployments_watcher.go +++ b/nomad/deploymentwatcher/deployments_watcher.go @@ -18,7 +18,7 @@ const ( LimitStateQueriesPerSecond = 100.0 // CrossDeploymentEvalBatchDuration is the duration in which evaluations are - // batched across all deployment watchers before commiting to Raft. + // batched across all deployment watchers before committing to Raft. CrossDeploymentEvalBatchDuration = 250 * time.Millisecond ) diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 47a0dea15..d92d8629e 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -250,7 +250,7 @@ func TestJobEndpoint_Register_Existing(t *testing.T) { t.Fatalf("bad index: %d", resp.Index) } - // Check to ensure the job version didn't get bumped becasue we submitted + // Check to ensure the job version didn't get bumped because we submitted // the same job state = s1.fsm.State() ws = memdb.NewWatchSet() @@ -902,7 +902,7 @@ func TestJobEndpoint_Stable(t *testing.T) { t.Fatalf("bad index: %d", resp.Index) } - // Create stablility request + // Create stability request stableReq := &structs.JobStabilityRequest{ JobID: job.ID, JobVersion: 0, diff --git a/nomad/node_endpoint.go b/nomad/node_endpoint.go index 3b8273563..e9b4f55ce 100644 --- a/nomad/node_endpoint.go +++ b/nomad/node_endpoint.go @@ -97,7 +97,7 @@ func (n *Node) Register(args *structs.NodeRegisterRequest, reply *structs.NodeUp return fmt.Errorf("failed to computed node class: %v", err) } - // Look for the node so we can detect a state transistion + // Look for the node so we can detect a state transition snap, err := n.srv.fsm.State().Snapshot() if err != nil { return err diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index d9a0ceed5..da4af13f4 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -420,7 +420,7 @@ func TestClientEndpoint_Register_GetEvals(t *testing.T) { t.Fatalf("index mis-match") } - // Transistion it to down and then ready + // Transition it to down and then ready node.Status = structs.NodeStatusDown reg = &structs.NodeRegisterRequest{ Node: node, diff --git a/nomad/server.go b/nomad/server.go index a18f69772..3327c44db 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -130,7 +130,7 @@ type Server struct { blockedEvals *BlockedEvals // deploymentWatcher is used to watch deployments and their allocations and - // make the required calls to continue to transistion the deployment. + // make the required calls to continue to transition the deployment. deploymentWatcher *deploymentwatcher.Watcher // evalBroker is used to manage the in-progress evaluations @@ -1122,7 +1122,7 @@ func (s *Server) Stats() map[string]map[string]string { return stats } -// Region retuns the region of the server +// Region returns the region of the server func (s *Server) Region() string { return s.config.Region } diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index fbef0e8df..760a35232 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -563,7 +563,7 @@ func (s *StateStore) UpsertJob(index uint64, job *structs.Job) error { return nil } -// upsertJobImpl is the inplementation for registering a job or updating a job definition +// upsertJobImpl is the implementation for registering a job or updating a job definition func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion bool, txn *memdb.Txn) error { // Check if the job already exists existing, err := txn.First("jobs", "id", job.ID) @@ -2369,7 +2369,7 @@ func (s *StateStore) setJobStatus(index uint64, txn *memdb.Txn, pSummary.Children = new(structs.JobChildrenSummary) } - // Determine the transistion and update the correct fields + // Determine the transition and update the correct fields children := pSummary.Children // Decrement old status diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 3abafca9d..df75ef67f 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -4728,11 +4728,11 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { } } -// Test that non-existant deployment can't be updated -func TestStateStore_UpsertDeploymentStatusUpdate_NonExistant(t *testing.T) { +// Test that non-existent deployment can't be updated +func TestStateStore_UpsertDeploymentStatusUpdate_NonExistent(t *testing.T) { state := testStateStore(t) - // Update the non-existant deployment + // Update the non-existent deployment req := &structs.DeploymentStatusUpdateRequest{ DeploymentUpdate: &structs.DeploymentStatusUpdate{ DeploymentID: structs.GenerateUUID(), @@ -4938,11 +4938,11 @@ func TestStateStore_UpdateJobStability(t *testing.T) { } } -// Test that non-existant deployment can't be promoted -func TestStateStore_UpsertDeploymentPromotion_NonExistant(t *testing.T) { +// Test that non-existent deployment can't be promoted +func TestStateStore_UpsertDeploymentPromotion_NonExistent(t *testing.T) { state := testStateStore(t) - // Promote the non-existant deployment + // Promote the non-existent deployment req := &structs.ApplyDeploymentPromoteRequest{ DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ DeploymentID: structs.GenerateUUID(), @@ -5059,7 +5059,7 @@ func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { t.Fatalf("bad: %v", err) } if !strings.Contains(err.Error(), "no canaries to promote") { - t.Fatalf("expect error promoting non-existant canaries: %v", err) + t.Fatalf("expect error promoting non-existent canaries: %v", err) } } @@ -5255,11 +5255,11 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { } } -// Test that allocation health can't be set against a non-existant deployment -func TestStateStore_UpsertDeploymentAllocHealth_NonExistant(t *testing.T) { +// Test that allocation health can't be set against a non-existent deployment +func TestStateStore_UpsertDeploymentAllocHealth_NonExistent(t *testing.T) { state := testStateStore(t) - // Set health against the non-existant deployment + // Set health against the non-existent deployment req := &structs.ApplyDeploymentAllocHealthRequest{ DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ DeploymentID: structs.GenerateUUID(), @@ -5297,8 +5297,8 @@ func TestStateStore_UpsertDeploymentAllocHealth_Terminal(t *testing.T) { } } -// Test that allocation health can't be set against a non-existant alloc -func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_NonExistant(t *testing.T) { +// Test that allocation health can't be set against a non-existent alloc +func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_NonExistent(t *testing.T) { state := testStateStore(t) // Insert a deployment diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 7f40183c5..5d336532c 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -523,7 +523,7 @@ type DeriveVaultTokenResponse struct { // Tasks is a mapping between the task name and the wrapped token Tasks map[string]string - // Error stores any error that occured. Errors are stored here so we can + // Error stores any error that occurred. Errors are stored here so we can // communicate whether it is retriable Error *RecoverableError @@ -688,7 +688,7 @@ type JobValidateResponse struct { // ValidationErrors is a list of validation errors ValidationErrors []string - // Error is a string version of any error that may have occured + // Error is a string version of any error that may have occurred Error string // Warnings contains any warnings about the given job. These may include @@ -3619,7 +3619,7 @@ func NewTaskEvent(event string) *TaskEvent { } } -// SetSetupError is used to store an error that occured while setting up the +// SetSetupError is used to store an error that occurred while setting up the // task func (e *TaskEvent) SetSetupError(err error) *TaskEvent { if err != nil { @@ -3999,7 +3999,7 @@ const ( VaultChangeModeRestart = "restart" ) -// Vault stores the set of premissions a task needs access to from Vault. +// Vault stores the set of permissions a task needs access to from Vault. type Vault struct { // Policies is the set of policies that the task needs access to Policies []string @@ -4054,7 +4054,7 @@ func (v *Vault) Validate() error { for _, p := range v.Policies { if p == "root" { - multierror.Append(&mErr, fmt.Errorf("Can not specifiy \"root\" policy")) + multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy")) } } @@ -4098,7 +4098,7 @@ func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint } // Deployment is the object that represents a job deployment which is used to -// transistion a job between versions. +// transition a job between versions. type Deployment struct { // ID is a generated UUID for the deployment ID string @@ -4959,7 +4959,7 @@ func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool, escaped } // CreateFailedFollowUpEval creates a follow up evaluation when the current one -// has been marked as failed becasue it has hit the delivery limit and will not +// has been marked as failed because it has hit the delivery limit and will not // be retried by the eval_broker. func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation { return &Evaluation{ @@ -5093,7 +5093,7 @@ type PlanResult struct { // Deployment is the deployment that was committed. Deployment *Deployment - // DeploymentUpdates is the set of deployment updates that were commited. + // DeploymentUpdates is the set of deployment updates that were committed. DeploymentUpdates []*DeploymentStatusUpdate // RefreshIndex is the index the worker should refresh state up to. diff --git a/nomad/vault.go b/nomad/vault.go index 0d14ebe5c..28b91111d 100644 --- a/nomad/vault.go +++ b/nomad/vault.go @@ -562,7 +562,7 @@ func (v *vaultClient) renew() error { } v.lastRenewed = time.Now() - v.logger.Printf("[DEBUG] vault: succesfully renewed server token") + v.logger.Printf("[DEBUG] vault: successfully renewed server token") return nil } @@ -628,7 +628,7 @@ func (v *vaultClient) parseSelfToken() error { // 2) Must have update capability for "auth/token/lookup/" (used to verify incoming tokens) // 3) Must have update capability for "/auth/token/revoke-accessor/" (used to revoke unneeded tokens) // 4) If configured to create tokens against a role: - // a) Must have read capability for "auth/token/roles/" // c) Role must: // 1) Not allow orphans @@ -754,7 +754,7 @@ func (v *vaultClient) validateCapabilities(role string, root bool) error { // hasCapability takes a path and returns whether the token has at least one of // the required capabilities on the given path. It also returns the set of -// capabilities the token does have as well as any error that occured. +// capabilities the token does have as well as any error that occurred. func (v *vaultClient) hasCapability(path string, required []string) (bool, []string, error) { caps, err := v.client.Sys().CapabilitiesSelf(path) if err != nil { diff --git a/nomad/vault_test.go b/nomad/vault_test.go index cdf87146d..cb69caf5a 100644 --- a/nomad/vault_test.go +++ b/nomad/vault_test.go @@ -171,7 +171,7 @@ func TestVaultClient_BadConfig(t *testing.T) { } } -// started seperately. +// started separately. // Test that the Vault Client can establish a connection even if it is started // before Vault is available. func TestVaultClient_EstablishConnection(t *testing.T) { diff --git a/nomad/worker.go b/nomad/worker.go index 6a274bf12..38e0a2f4a 100644 --- a/nomad/worker.go +++ b/nomad/worker.go @@ -450,7 +450,7 @@ func (w *Worker) ReblockEval(eval *structs.Evaluation) error { ws := memdb.NewWatchSet() summary, err := w.srv.fsm.state.JobSummaryByID(ws, eval.JobID) if err != nil { - return fmt.Errorf("couldn't retreive job summary: %v", err) + return fmt.Errorf("couldn't retrieve job summary: %v", err) } if summary != nil { var hasChanged bool diff --git a/scheduler/reconcile.go b/scheduler/reconcile.go index 23313e5cf..dae2bb715 100644 --- a/scheduler/reconcile.go +++ b/scheduler/reconcile.go @@ -37,7 +37,7 @@ type allocReconciler struct { job *structs.Job // jobID is the ID of the job being operated on. The job may be nil if it is - // being stopped so we require this seperately. + // being stopped so we require this separately. jobID string // oldDeployment is the last deployment for the job @@ -579,7 +579,7 @@ func (a *allocReconciler) computeLimit(group *structs.TaskGroup, untainted, dest } // computePlacement returns the set of allocations to place given the group -// definiton, the set of untainted and migrating allocations for the group. +// definition, the set of untainted and migrating allocations for the group. func (a *allocReconciler) computePlacements(group *structs.TaskGroup, nameIndex *allocNameIndex, untainted, migrate allocSet) []allocPlaceResult { @@ -601,7 +601,7 @@ func (a *allocReconciler) computePlacements(group *structs.TaskGroup, } // computeStop returns the set of allocations that are marked for stopping given -// the group definiton, the set of allocations in various states and whether we +// the group definition, the set of allocations in various states and whether we // are canarying. func (a *allocReconciler) computeStop(group *structs.TaskGroup, nameIndex *allocNameIndex, untainted, migrate, lost, canaries allocSet, canaryState bool) allocSet { diff --git a/scheduler/reconcile_test.go b/scheduler/reconcile_test.go index c9a61164e..282cdfade 100644 --- a/scheduler/reconcile_test.go +++ b/scheduler/reconcile_test.go @@ -3005,7 +3005,7 @@ func TestReconciler_JobChange_ScaleUp_SecondEval(t *testing.T) { deploymentUpdates: nil, desiredTGUpdates: map[string]*structs.DesiredUpdates{ job.TaskGroups[0].Name: { - // All should be ignored becasue nothing has been marked as + // All should be ignored because nothing has been marked as // healthy. Ignore: 30, }, diff --git a/scheduler/reconcile_util.go b/scheduler/reconcile_util.go index 75c2994b5..bf60723a1 100644 --- a/scheduler/reconcile_util.go +++ b/scheduler/reconcile_util.go @@ -264,7 +264,7 @@ func newAllocNameIndex(job, taskGroup string, count int, in allocSet) *allocName // bitmapFrom creates a bitmap from the given allocation set and a minimum size // maybe given. The size of the bitmap is as the larger of the passed minimum -// and t the maximum alloc index of the passed input (byte alligned). +// and t the maximum alloc index of the passed input (byte aligned). func bitmapFrom(input allocSet, minSize uint) structs.Bitmap { var max uint for _, a := range input { @@ -376,7 +376,7 @@ func (a *allocNameIndex) NextCanaries(n uint, existing, destructive allocSet) [] } } - // We have exhausted the prefered and free set, now just pick overlapping + // We have exhausted the preferred and free set, now just pick overlapping // indexes var i uint for i = 0; i < remainder; i++ { diff --git a/scheduler/system_sched_test.go b/scheduler/system_sched_test.go index 36713a2f6..b411fd030 100644 --- a/scheduler/system_sched_test.go +++ b/scheduler/system_sched_test.go @@ -1188,7 +1188,7 @@ func TestSystemSched_RetryLimit(t *testing.T) { // This test ensures that the scheduler doesn't increment the queued allocation // count for a task group when allocations can't be created on currently -// availabe nodes because of constrain mismatches. +// available nodes because of constrain mismatches. func TestSystemSched_Queued_With_Constraints(t *testing.T) { h := NewHarness(t)