From a9e3a414071d3c629bc8d1a11730a88b3fbff4d5 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Tue, 26 Sep 2017 15:26:33 -0700 Subject: [PATCH] Enable more linters --- GNUmakefile | 21 +++- acl/policy.go | 2 +- acl/policy_test.go | 10 +- api/agent_test.go | 72 ++++++------ api/allocations_test.go | 12 +- api/compose_test.go | 14 +-- api/evaluations_test.go | 12 +- api/jobs_test.go | 25 +++-- api/jobs_testing.go | 8 +- api/nodes.go | 2 +- api/nodes_test.go | 12 +- api/operator.go | 2 +- api/tasks_test.go | 14 +-- client/alloc_runner.go | 10 -- client/alloc_runner_test.go | 12 ++ client/allocdir/alloc_dir_test.go | 5 - client/allocdir/fs_windows.go | 11 -- client/client.go | 33 ++++-- client/client_test.go | 2 +- client/consul_template.go | 6 - client/driver/docker.go | 71 ++++++------ client/driver/docker_default.go | 2 +- client/driver/docker_test.go | 38 +++---- client/driver/docker_windows.go | 2 +- client/driver/driver.go | 12 +- client/driver/driver_test.go | 29 +---- client/driver/env/env_test.go | 2 +- client/driver/exec.go | 10 +- client/driver/exec_linux.go | 6 + client/driver/executor/executor.go | 2 - client/driver/executor/executor_test.go | 14 --- client/driver/java.go | 10 +- client/driver/logging/rotator.go | 2 +- client/driver/logging/syslog_parser.go | 2 +- .../driver/logging/syslog_server_unix_test.go | 2 +- client/driver/logging/universal_collector.go | 4 +- client/driver/lxc.go | 28 ++--- client/driver/qemu.go | 8 +- client/driver/qemu_test.go | 4 +- client/driver/raw_exec.go | 4 +- client/driver/rkt.go | 24 ++-- client/driver/rkt_test.go | 4 +- client/driver/utils_unix.go | 8 -- client/driver/utils_windows.go | 4 - client/fingerprint/env_aws.go | 7 +- client/fingerprint/env_gce.go | 29 +++-- client/fingerprint/vault.go | 2 +- client/restarts.go | 6 - client/state_database.go | 11 -- client/stats/cpu.go | 1 - client/stats/host.go | 1 - client/task_runner.go | 25 ++++- client/task_runner_test.go | 10 +- client/util_test.go | 2 +- client/vaultclient/vaultclient.go | 14 --- command/agent/agent.go | 14 +-- command/agent/agent_endpoint_test.go | 9 +- command/agent/config_parse_test.go | 4 +- command/agent/config_test.go | 2 +- command/agent/consul/client.go | 4 - command/agent/consul/int_test.go | 4 + command/agent/fs_endpoint.go | 2 +- command/agent/fs_endpoint_test.go | 4 +- command/agent/job_endpoint_test.go | 4 +- command/agent/log_writer.go | 2 +- command/alloc_status.go | 2 - command/eval_status.go | 2 +- command/meta.go | 2 +- command/monitor.go | 16 --- command/monitor_test.go | 76 +------------ command/namespace_delete.go | 2 +- command/node_drain.go | 2 +- command/node_status.go | 4 +- command/plan.go | 2 - command/run.go | 19 ---- demo/digitalocean/app/bench.go | 2 +- helper/fields/data_test.go | 16 +-- helper/flatmap/flatmap.go | 14 +-- helper/flatmap/flatmap_test.go | 8 +- helper/funcs.go | 2 +- helper/funcs_test.go | 8 +- jobspec/parse_test.go | 54 ++++----- main.go | 2 +- nomad/acl_endpoint.go | 2 +- nomad/acl_testutil_test.go | 2 +- nomad/fsm_test.go | 12 +- nomad/job_endpoint_test.go | 9 +- nomad/mock/mock.go | 28 ++--- nomad/node_endpoint_test.go | 4 +- nomad/operator_endpoint_test.go | 2 +- nomad/periodic.go | 2 +- nomad/periodic_test.go | 40 +++---- nomad/plan_apply.go | 3 +- nomad/plan_apply_pool_test.go | 2 +- nomad/plan_apply_test.go | 36 +++--- nomad/serf.go | 2 +- nomad/server.go | 6 +- nomad/state/notify.go | 2 +- nomad/state/schema.go | 56 +++++----- nomad/state/state_store.go | 2 +- nomad/state/state_store_test.go | 36 +++--- nomad/structs/funcs_test.go | 30 ++--- nomad/structs/network_test.go | 42 +++---- nomad/structs/node_class_test.go | 2 +- nomad/structs/structs.go | 2 +- nomad/structs/structs_test.go | 98 ++++++++--------- nomad/system_endpoint_test.go | 2 +- nomad/vault.go | 8 -- nomad/vault_test.go | 8 +- nomad/worker_test.go | 10 +- scheduler/annotate_test.go | 2 +- scheduler/context_test.go | 6 +- scheduler/feasible_test.go | 104 +++++++++--------- scheduler/generic_sched_test.go | 4 +- scheduler/rank_test.go | 32 +++--- scheduler/reconcile_util.go | 17 +-- scheduler/reconcile_util_test.go | 2 +- scheduler/select_test.go | 12 +- scheduler/util.go | 24 +--- scheduler/util_test.go | 100 ++++++++--------- 120 files changed, 745 insertions(+), 973 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 555d1a04f..5adc9720e 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -7,9 +7,6 @@ GIT_DIRTY := $(if $(shell git status --porcelain),+CHANGES) GO_LDFLAGS := "-X main.GitCommit=$(GIT_COMMIT)$(GIT_DIRTY)" GO_TAGS = -# Enable additional linters as the codebase evolves to pass them -CHECKS ?= --enable goimports - default: help ifeq (,$(findstring $(THIS_OS),Darwin Linux FreeBSD)) @@ -160,11 +157,23 @@ check: ## Lint the source code @gometalinter \ --deadline 10m \ --vendor \ - --exclude '(.*\.generated\.go:\d+:|bindata_assetfs)' \ + --exclude='.*\.generated\.go' \ + --exclude='.*bindata_assetfs\.go' \ --skip="ui/" \ + --sort="path" \ + --aggregate \ + --enable-gc \ --disable-all \ - --sort severity \ - $(CHECKS) \ + --enable goimports \ + --enable misspell \ + --enable vet \ + --enable deadcode \ + --enable varcheck \ + --enable ineffassign \ + --enable structcheck \ + --enable unconvert \ + --enable gas \ + --enable gofmt \ ./... .PHONY: checkscripts diff --git a/acl/policy.go b/acl/policy.go index 757fe2fde..aac949822 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -78,7 +78,7 @@ func isNamespaceCapabilityValid(cap string) bool { case NamespaceCapabilityDeny, NamespaceCapabilityListJobs, NamespaceCapabilityReadJob, NamespaceCapabilitySubmitJob, NamespaceCapabilityReadLogs, NamespaceCapabilityReadFS: return true - // Seperate the enterprise-only capabilities + // Separate the enterprise-only capabilities case NamespaceCapabilitySentinelOverride: return true default: diff --git a/acl/policy_test.go b/acl/policy_test.go index 9c553cee7..0900547ad 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -24,7 +24,7 @@ func TestParse(t *testing.T) { "", &Policy{ Namespaces: []*NamespacePolicy{ - &NamespacePolicy{ + { Name: "default", Policy: PolicyRead, Capabilities: []string{ @@ -59,7 +59,7 @@ func TestParse(t *testing.T) { "", &Policy{ Namespaces: []*NamespacePolicy{ - &NamespacePolicy{ + { Name: "default", Policy: PolicyRead, Capabilities: []string{ @@ -67,7 +67,7 @@ func TestParse(t *testing.T) { NamespaceCapabilityReadJob, }, }, - &NamespacePolicy{ + { Name: "other", Policy: PolicyWrite, Capabilities: []string{ @@ -78,7 +78,7 @@ func TestParse(t *testing.T) { NamespaceCapabilityReadFS, }, }, - &NamespacePolicy{ + { Name: "secret", Capabilities: []string{ NamespaceCapabilityDeny, @@ -160,7 +160,7 @@ func TestParse(t *testing.T) { "", &Policy{ Namespaces: []*NamespacePolicy{ - &NamespacePolicy{ + { Name: "default", Policy: "", Capabilities: []string{ diff --git a/api/agent_test.go b/api/agent_test.go index 82ea3b5a1..8e8f91217 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -142,98 +142,98 @@ func TestAgents_Sort(t *testing.T) { }{ { []*AgentMember{ - &AgentMember{Name: "nomad-2.vac.us-east", + {Name: "nomad-2.vac.us-east", Tags: map[string]string{"region": "us-east", "dc": "us-east-1c"}}, - &AgentMember{Name: "nomad-1.global", + {Name: "nomad-1.global", Tags: map[string]string{"region": "global", "dc": "dc1"}}, - &AgentMember{Name: "nomad-1.vac.us-east", + {Name: "nomad-1.vac.us-east", Tags: map[string]string{"region": "us-east", "dc": "us-east-1c"}}, }, []*AgentMember{ - &AgentMember{Name: "nomad-1.global", + {Name: "nomad-1.global", Tags: map[string]string{"region": "global", "dc": "dc1"}}, - &AgentMember{Name: "nomad-1.vac.us-east", + {Name: "nomad-1.vac.us-east", Tags: map[string]string{"region": "us-east", "dc": "us-east-1c"}}, - &AgentMember{Name: "nomad-2.vac.us-east", + {Name: "nomad-2.vac.us-east", Tags: map[string]string{"region": "us-east", "dc": "us-east-1c"}}, }, }, { []*AgentMember{ - &AgentMember{Name: "nomad-02.tam.us-east", + {Name: "nomad-02.tam.us-east", Tags: map[string]string{"region": "us-east", "dc": "tampa"}}, - &AgentMember{Name: "nomad-02.pal.us-west", + {Name: "nomad-02.pal.us-west", Tags: map[string]string{"region": "us-west", "dc": "palo_alto"}}, - &AgentMember{Name: "nomad-01.pal.us-west", + {Name: "nomad-01.pal.us-west", Tags: map[string]string{"region": "us-west", "dc": "palo_alto"}}, - &AgentMember{Name: "nomad-01.tam.us-east", + {Name: "nomad-01.tam.us-east", Tags: map[string]string{"region": "us-east", "dc": "tampa"}}, }, []*AgentMember{ - &AgentMember{Name: "nomad-01.tam.us-east", + {Name: "nomad-01.tam.us-east", Tags: map[string]string{"region": "us-east", "dc": "tampa"}}, - &AgentMember{Name: "nomad-02.tam.us-east", + {Name: "nomad-02.tam.us-east", Tags: map[string]string{"region": "us-east", "dc": "tampa"}}, - &AgentMember{Name: "nomad-01.pal.us-west", + {Name: "nomad-01.pal.us-west", Tags: map[string]string{"region": "us-west", "dc": "palo_alto"}}, - &AgentMember{Name: "nomad-02.pal.us-west", + {Name: "nomad-02.pal.us-west", Tags: map[string]string{"region": "us-west", "dc": "palo_alto"}}, }, }, { []*AgentMember{ - &AgentMember{Name: "nomad-02.tam.us-east", + {Name: "nomad-02.tam.us-east", Tags: map[string]string{"region": "us-east", "dc": "tampa"}}, - &AgentMember{Name: "nomad-02.ams.europe", + {Name: "nomad-02.ams.europe", Tags: map[string]string{"region": "europe", "dc": "amsterdam"}}, - &AgentMember{Name: "nomad-01.tam.us-east", + {Name: "nomad-01.tam.us-east", Tags: map[string]string{"region": "us-east", "dc": "tampa"}}, - &AgentMember{Name: "nomad-01.ams.europe", + {Name: "nomad-01.ams.europe", Tags: map[string]string{"region": "europe", "dc": "amsterdam"}}, }, []*AgentMember{ - &AgentMember{Name: "nomad-01.ams.europe", + {Name: "nomad-01.ams.europe", Tags: map[string]string{"region": "europe", "dc": "amsterdam"}}, - &AgentMember{Name: "nomad-02.ams.europe", + {Name: "nomad-02.ams.europe", Tags: map[string]string{"region": "europe", "dc": "amsterdam"}}, - &AgentMember{Name: "nomad-01.tam.us-east", + {Name: "nomad-01.tam.us-east", Tags: map[string]string{"region": "us-east", "dc": "tampa"}}, - &AgentMember{Name: "nomad-02.tam.us-east", + {Name: "nomad-02.tam.us-east", Tags: map[string]string{"region": "us-east", "dc": "tampa"}}, }, }, { []*AgentMember{ - &AgentMember{Name: "nomad-02.ber.europe", + {Name: "nomad-02.ber.europe", Tags: map[string]string{"region": "europe", "dc": "berlin"}}, - &AgentMember{Name: "nomad-02.ams.europe", + {Name: "nomad-02.ams.europe", Tags: map[string]string{"region": "europe", "dc": "amsterdam"}}, - &AgentMember{Name: "nomad-01.ams.europe", + {Name: "nomad-01.ams.europe", Tags: map[string]string{"region": "europe", "dc": "amsterdam"}}, - &AgentMember{Name: "nomad-01.ber.europe", + {Name: "nomad-01.ber.europe", Tags: map[string]string{"region": "europe", "dc": "berlin"}}, }, []*AgentMember{ - &AgentMember{Name: "nomad-01.ams.europe", + {Name: "nomad-01.ams.europe", Tags: map[string]string{"region": "europe", "dc": "amsterdam"}}, - &AgentMember{Name: "nomad-02.ams.europe", + {Name: "nomad-02.ams.europe", Tags: map[string]string{"region": "europe", "dc": "amsterdam"}}, - &AgentMember{Name: "nomad-01.ber.europe", + {Name: "nomad-01.ber.europe", Tags: map[string]string{"region": "europe", "dc": "berlin"}}, - &AgentMember{Name: "nomad-02.ber.europe", + {Name: "nomad-02.ber.europe", Tags: map[string]string{"region": "europe", "dc": "berlin"}}, }, }, { []*AgentMember{ - &AgentMember{Name: "nomad-1.global"}, - &AgentMember{Name: "nomad-3.global"}, - &AgentMember{Name: "nomad-2.global"}, + {Name: "nomad-1.global"}, + {Name: "nomad-3.global"}, + {Name: "nomad-2.global"}, }, []*AgentMember{ - &AgentMember{Name: "nomad-1.global"}, - &AgentMember{Name: "nomad-2.global"}, - &AgentMember{Name: "nomad-3.global"}, + {Name: "nomad-1.global"}, + {Name: "nomad-2.global"}, + {Name: "nomad-3.global"}, }, }, } diff --git a/api/allocations_test.go b/api/allocations_test.go index ba0ab9663..63c67a050 100644 --- a/api/allocations_test.go +++ b/api/allocations_test.go @@ -104,16 +104,16 @@ func TestAllocations_PrefixList(t *testing.T) { func TestAllocations_CreateIndexSort(t *testing.T) { t.Parallel() allocs := []*AllocationListStub{ - &AllocationListStub{CreateIndex: 2}, - &AllocationListStub{CreateIndex: 1}, - &AllocationListStub{CreateIndex: 5}, + {CreateIndex: 2}, + {CreateIndex: 1}, + {CreateIndex: 5}, } sort.Sort(AllocIndexSort(allocs)) expect := []*AllocationListStub{ - &AllocationListStub{CreateIndex: 5}, - &AllocationListStub{CreateIndex: 2}, - &AllocationListStub{CreateIndex: 1}, + {CreateIndex: 5}, + {CreateIndex: 2}, + {CreateIndex: 1}, } if !reflect.DeepEqual(allocs, expect) { t.Fatalf("\n\n%#v\n\n%#v", allocs, expect) diff --git a/api/compose_test.go b/api/compose_test.go index 40dd55090..737497071 100644 --- a/api/compose_test.go +++ b/api/compose_test.go @@ -20,7 +20,7 @@ func TestCompose(t *testing.T) { DiskMB: helper.IntToPtr(2048), IOPS: helper.IntToPtr(500), Networks: []*NetworkResource{ - &NetworkResource{ + { CIDR: "0.0.0.0/0", MBits: helper.IntToPtr(100), ReservedPorts: []Port{{"", 80}, {"", 443}}, @@ -55,25 +55,25 @@ func TestCompose(t *testing.T) { "foo": "bar", }, Constraints: []*Constraint{ - &Constraint{ + { LTarget: "kernel.name", RTarget: "linux", Operand: "=", }, }, TaskGroups: []*TaskGroup{ - &TaskGroup{ + { Name: helper.StringToPtr("grp1"), Count: helper.IntToPtr(2), Constraints: []*Constraint{ - &Constraint{ + { LTarget: "kernel.name", RTarget: "linux", Operand: "=", }, }, Tasks: []*Task{ - &Task{ + { Name: "task1", Driver: "exec", Resources: &Resources{ @@ -82,7 +82,7 @@ func TestCompose(t *testing.T) { DiskMB: helper.IntToPtr(2048), IOPS: helper.IntToPtr(500), Networks: []*NetworkResource{ - &NetworkResource{ + { CIDR: "0.0.0.0/0", MBits: helper.IntToPtr(100), ReservedPorts: []Port{ @@ -93,7 +93,7 @@ func TestCompose(t *testing.T) { }, }, Constraints: []*Constraint{ - &Constraint{ + { LTarget: "kernel.name", RTarget: "linux", Operand: "=", diff --git a/api/evaluations_test.go b/api/evaluations_test.go index 0ec17be3c..83eb6c91b 100644 --- a/api/evaluations_test.go +++ b/api/evaluations_test.go @@ -145,16 +145,16 @@ func TestEvaluations_Allocations(t *testing.T) { func TestEvaluations_Sort(t *testing.T) { t.Parallel() evals := []*Evaluation{ - &Evaluation{CreateIndex: 2}, - &Evaluation{CreateIndex: 1}, - &Evaluation{CreateIndex: 5}, + {CreateIndex: 2}, + {CreateIndex: 1}, + {CreateIndex: 5}, } sort.Sort(EvalIndexSort(evals)) expect := []*Evaluation{ - &Evaluation{CreateIndex: 5}, - &Evaluation{CreateIndex: 2}, - &Evaluation{CreateIndex: 1}, + {CreateIndex: 5}, + {CreateIndex: 2}, + {CreateIndex: 1}, } if !reflect.DeepEqual(evals, expect) { t.Fatalf("\n\n%#v\n\n%#v", evals, expect) diff --git a/api/jobs_test.go b/api/jobs_test.go index 7bb80a5e3..b09fde60e 100644 --- a/api/jobs_test.go +++ b/api/jobs_test.go @@ -622,13 +622,13 @@ func TestJobs_EnforceRegister(t *testing.T) { // Create a job and attempt to register it with an incorrect index. job := testJob() - resp2, wm, err := jobs.EnforceRegister(job, 10, nil) + resp2, _, err := jobs.EnforceRegister(job, 10, nil) if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) { t.Fatalf("expected enforcement error: %v", err) } // Register - resp2, wm, err = jobs.EnforceRegister(job, 0, nil) + resp2, wm, err := jobs.EnforceRegister(job, 0, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -655,7 +655,7 @@ func TestJobs_EnforceRegister(t *testing.T) { curIndex := resp[0].JobModifyIndex // Fail at incorrect index - resp2, wm, err = jobs.EnforceRegister(job, 123456, nil) + resp2, _, err = jobs.EnforceRegister(job, 123456, nil) if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) { t.Fatalf("expected enforcement error: %v", err) } @@ -699,7 +699,7 @@ func TestJobs_Revert(t *testing.T) { assertWriteMeta(t, wm) // Fail revert at incorrect enforce - _, wm, err = jobs.Revert(*job.ID, 0, helper.Uint64ToPtr(10), nil) + _, _, err = jobs.Revert(*job.ID, 0, helper.Uint64ToPtr(10), nil) if err == nil || !strings.Contains(err.Error(), "enforcing version") { t.Fatalf("expected enforcement error: %v", err) } @@ -1127,6 +1127,7 @@ func TestJobs_Plan(t *testing.T) { if len(planResp.CreatedEvals) == 0 { t.Fatalf("got no CreatedEvals: %#v", planResp) } + assertWriteMeta(t, wm) // Make a plan request w/o the diff planResp, wm, err = jobs.Plan(job, false, nil) @@ -1263,12 +1264,12 @@ func TestJobs_Constrain(t *testing.T) { // Adding another constraint preserves the original job.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000")) expect := []*Constraint{ - &Constraint{ + { LTarget: "kernel.name", RTarget: "darwin", Operand: "=", }, - &Constraint{ + { LTarget: "memory.totalbytes", RTarget: "128000000", Operand: ">=", @@ -1282,16 +1283,16 @@ func TestJobs_Constrain(t *testing.T) { func TestJobs_Sort(t *testing.T) { t.Parallel() jobs := []*JobListStub{ - &JobListStub{ID: "job2"}, - &JobListStub{ID: "job0"}, - &JobListStub{ID: "job1"}, + {ID: "job2"}, + {ID: "job0"}, + {ID: "job1"}, } sort.Sort(JobIDSort(jobs)) expect := []*JobListStub{ - &JobListStub{ID: "job0"}, - &JobListStub{ID: "job1"}, - &JobListStub{ID: "job2"}, + {ID: "job0"}, + {ID: "job1"}, + {ID: "job2"}, } if !reflect.DeepEqual(jobs, expect) { t.Fatalf("\n\n%#v\n\n%#v", jobs, expect) diff --git a/api/jobs_testing.go b/api/jobs_testing.go index bed9ac474..ecea68c70 100644 --- a/api/jobs_testing.go +++ b/api/jobs_testing.go @@ -17,14 +17,14 @@ func MockJob() *Job { AllAtOnce: helper.BoolToPtr(false), Datacenters: []string{"dc1"}, Constraints: []*Constraint{ - &Constraint{ + { LTarget: "${attr.kernel.name}", RTarget: "linux", Operand: "=", }, }, TaskGroups: []*TaskGroup{ - &TaskGroup{ + { Name: helper.StringToPtr("web"), Count: helper.IntToPtr(10), EphemeralDisk: &EphemeralDisk{ @@ -37,7 +37,7 @@ func MockJob() *Job { Mode: helper.StringToPtr("delay"), }, Tasks: []*Task{ - &Task{ + { Name: "web", Driver: "exec", Config: map[string]interface{}{ @@ -72,7 +72,7 @@ func MockJob() *Job { CPU: helper.IntToPtr(500), MemoryMB: helper.IntToPtr(256), Networks: []*NetworkResource{ - &NetworkResource{ + { MBits: helper.IntToPtr(50), DynamicPorts: []Port{{Label: "http"}, {Label: "admin"}}, }, diff --git a/api/nodes.go b/api/nodes.go index 50a159628..e1ef5e2aa 100644 --- a/api/nodes.go +++ b/api/nodes.go @@ -22,7 +22,7 @@ func (n *Nodes) List(q *QueryOptions) ([]*NodeListStub, *QueryMeta, error) { if err != nil { return nil, nil, err } - sort.Sort(NodeIndexSort(resp)) + sort.Sort(resp) return resp, qm, nil } diff --git a/api/nodes_test.go b/api/nodes_test.go index 562da6324..b3cc6c2b1 100644 --- a/api/nodes_test.go +++ b/api/nodes_test.go @@ -260,16 +260,16 @@ func TestNodes_ForceEvaluate(t *testing.T) { func TestNodes_Sort(t *testing.T) { t.Parallel() nodes := []*NodeListStub{ - &NodeListStub{CreateIndex: 2}, - &NodeListStub{CreateIndex: 1}, - &NodeListStub{CreateIndex: 5}, + {CreateIndex: 2}, + {CreateIndex: 1}, + {CreateIndex: 5}, } sort.Sort(NodeIndexSort(nodes)) expect := []*NodeListStub{ - &NodeListStub{CreateIndex: 5}, - &NodeListStub{CreateIndex: 2}, - &NodeListStub{CreateIndex: 1}, + {CreateIndex: 5}, + {CreateIndex: 2}, + {CreateIndex: 1}, } if !reflect.DeepEqual(nodes, expect) { t.Fatalf("\n\n%#v\n\n%#v", nodes, expect) diff --git a/api/operator.go b/api/operator.go index a10648a29..a83d54cb3 100644 --- a/api/operator.go +++ b/api/operator.go @@ -75,7 +75,7 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err // TODO (alexdadgar) Currently we made address a query parameter. Once // IDs are in place this will be DELETE /v1/operator/raft/peer/. - r.params.Set("address", string(address)) + r.params.Set("address", address) _, resp, err := requireOK(op.c.doRequest(r)) if err != nil { diff --git a/api/tasks_test.go b/api/tasks_test.go index cc64bc93e..d870eab27 100644 --- a/api/tasks_test.go +++ b/api/tasks_test.go @@ -38,12 +38,12 @@ func TestTaskGroup_Constrain(t *testing.T) { // Add a second constraint grp.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000")) expect := []*Constraint{ - &Constraint{ + { LTarget: "kernel.name", RTarget: "darwin", Operand: "=", }, - &Constraint{ + { LTarget: "memory.totalbytes", RTarget: "128000000", Operand: ">=", @@ -95,11 +95,11 @@ func TestTaskGroup_AddTask(t *testing.T) { // Add a second task grp.AddTask(NewTask("task2", "exec")) expect := []*Task{ - &Task{ + { Name: "task1", Driver: "java", }, - &Task{ + { Name: "task2", Driver: "exec", }, @@ -178,7 +178,7 @@ func TestTask_Require(t *testing.T) { DiskMB: helper.IntToPtr(2048), IOPS: helper.IntToPtr(500), Networks: []*NetworkResource{ - &NetworkResource{ + { CIDR: "0.0.0.0/0", MBits: helper.IntToPtr(100), ReservedPorts: []Port{{"", 80}, {"", 443}}, @@ -214,12 +214,12 @@ func TestTask_Constrain(t *testing.T) { // Add a second constraint task.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000")) expect := []*Constraint{ - &Constraint{ + { LTarget: "kernel.name", RTarget: "darwin", Operand: "=", }, - &Constraint{ + { LTarget: "memory.totalbytes", RTarget: "128000000", Operand: ">=", diff --git a/client/alloc_runner.go b/client/alloc_runner.go index c4725c6e4..72aefe4b7 100644 --- a/client/alloc_runner.go +++ b/client/alloc_runner.go @@ -20,16 +20,6 @@ import ( cstructs "github.com/hashicorp/nomad/client/structs" ) -const ( - // taskReceivedSyncLimit is how long the client will wait before sending - // that a task was received to the server. The client does not immediately - // send that the task was received to the server because another transition - // to running or failed is likely to occur immediately after and a single - // update will transfer all past state information. If not other transition - // has occurred up to this limit, we will send to the server. - taskReceivedSyncLimit = 30 * time.Second -) - var ( // The following are the key paths written to the state database allocRunnerStateAllocKey = []byte("alloc") diff --git a/client/alloc_runner_test.go b/client/alloc_runner_test.go index ffd1de0e4..1730f0772 100644 --- a/client/alloc_runner_test.go +++ b/client/alloc_runner_test.go @@ -49,6 +49,18 @@ func (m *MockAllocStateUpdater) Last() (int, *structs.Allocation) { return n, m.Allocs[n-1].Copy() } +// allocationBucketExists checks if the allocation bucket was created. +func allocationBucketExists(tx *bolt.Tx, allocID string) bool { + allocations := tx.Bucket(allocationsBucket) + if allocations == nil { + return false + } + + // Retrieve the specific allocations bucket + alloc := allocations.Bucket([]byte(allocID)) + return alloc != nil +} + func testAllocRunnerFromAlloc(alloc *structs.Allocation, restarts bool) (*MockAllocStateUpdater, *AllocRunner) { logger := testLogger() conf := config.DefaultConfig() diff --git a/client/allocdir/alloc_dir_test.go b/client/allocdir/alloc_dir_test.go index aa870d1b7..a89ac3948 100644 --- a/client/allocdir/alloc_dir_test.go +++ b/client/allocdir/alloc_dir_test.go @@ -21,11 +21,6 @@ import ( ) var ( - osMountSharedDirSupport = map[string]bool{ - "darwin": true, - "linux": true, - } - t1 = &structs.Task{ Name: "web", Driver: "exec", diff --git a/client/allocdir/fs_windows.go b/client/allocdir/fs_windows.go index 3667c1953..845bd7767 100644 --- a/client/allocdir/fs_windows.go +++ b/client/allocdir/fs_windows.go @@ -1,7 +1,6 @@ package allocdir import ( - "errors" "os" "path/filepath" ) @@ -25,11 +24,6 @@ func linkOrCopy(src, dst string, uid, gid int, perm os.FileMode) error { return fileCopy(src, dst, uid, gid, perm) } -// The windows version does nothing currently. -func mountSharedDir(dir string) error { - return errors.New("Mount on Windows not supported.") -} - // The windows version does nothing currently. func linkDir(src, dst string) error { return nil @@ -55,11 +49,6 @@ func dropDirPermissions(path string, desired os.FileMode) error { return nil } -// The windows version does nothing currently. -func unmountSharedDir(dir string) error { - return nil -} - // MountSpecialDirs mounts the dev and proc file system on the chroot of the // task. It's a no-op on windows. func MountSpecialDirs(taskDir string) error { diff --git a/client/client.go b/client/client.go index 680fbd9af..3aec48be5 100644 --- a/client/client.go +++ b/client/client.go @@ -62,7 +62,7 @@ const ( stateSnapshotIntv = 60 * time.Second // initialHeartbeatStagger is used to stagger the interval between - // starting and the intial heartbeat. After the intial heartbeat, + // starting and the initial heartbeat. After the initial heartbeat, // we switch to using the TTL specified by the servers. initialHeartbeatStagger = 10 * time.Second @@ -120,7 +120,7 @@ type Client struct { triggerDiscoveryCh chan struct{} // discovered will be ticked whenever Consul discovery completes - // succesfully + // successfully serversDiscoveredCh chan struct{} // allocs is the current set of allocations @@ -473,7 +473,7 @@ func (c *Client) Stats() map[string]map[string]string { c.heartbeatLock.Lock() defer c.heartbeatLock.Unlock() stats := map[string]map[string]string{ - "client": map[string]string{ + "client": { "node_id": c.NodeID(), "known_servers": c.servers.all().String(), "num_allocations": strconv.Itoa(c.NumAllocs()), @@ -1650,10 +1650,9 @@ func (c *Client) deriveToken(alloc *structs.Allocation, taskNames []string, vcli } verifiedTasks := []string{} - found := false // Check if the given task names actually exist in the allocation for _, taskName := range taskNames { - found = false + found := false for _, task := range group.Tasks { if task.Name == taskName { found = true @@ -1903,7 +1902,10 @@ func (c *Client) setGaugeForMemoryStats(nodeID string, hStats *stats.HostStats) func (c *Client) setGaugeForCPUStats(nodeID string, hStats *stats.HostStats) { for _, cpu := range hStats.CPU { if !c.config.DisableTaggedMetrics { - labels := append(c.baseLabels, metrics.Label{"cpu", cpu.CPU}) + labels := append(c.baseLabels, metrics.Label{ + Name: "cpu", + Value: cpu.CPU, + }) metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "total"}, float32(cpu.Total), labels) metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "user"}, float32(cpu.User), labels) @@ -1924,7 +1926,10 @@ func (c *Client) setGaugeForCPUStats(nodeID string, hStats *stats.HostStats) { func (c *Client) setGaugeForDiskStats(nodeID string, hStats *stats.HostStats) { for _, disk := range hStats.DiskStats { if !c.config.DisableTaggedMetrics { - labels := append(c.baseLabels, metrics.Label{"disk", disk.Device}) + labels := append(c.baseLabels, metrics.Label{ + Name: "disk", + Value: disk.Device, + }) metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "size"}, float32(disk.Size), labels) metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "used"}, float32(disk.Used), labels) @@ -1969,7 +1974,10 @@ func (c *Client) setGaugeForAllocationStats(nodeID string) { for _, n := range allocated.Networks { if !c.config.DisableTaggedMetrics { - labels := append(c.baseLabels, metrics.Label{"device", n.Device}) + labels := append(c.baseLabels, metrics.Label{ + Name: "device", + Value: n.Device, + }) metrics.SetGaugeWithLabels([]string{"client", "allocated", "network"}, float32(n.MBits), labels) } @@ -1999,18 +2007,19 @@ func (c *Client) setGaugeForAllocationStats(nodeID string) { } for _, n := range allocated.Networks { - totalMbits := 0 - totalIdx := total.NetIndex(n) if totalIdx != -1 { - totalMbits = total.Networks[totalIdx].MBits continue } + totalMbits := total.Networks[totalIdx].MBits unallocatedMbits := totalMbits - n.MBits if !c.config.DisableTaggedMetrics { - labels := append(c.baseLabels, metrics.Label{"device", n.Device}) + labels := append(c.baseLabels, metrics.Label{ + Name: "device", + Value: n.Device, + }) metrics.SetGaugeWithLabels([]string{"client", "unallocated", "network"}, float32(unallocatedMbits), labels) } diff --git a/client/client_test.go b/client/client_test.go index 756823263..dce58395f 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -137,7 +137,7 @@ func TestClient_StartStop(t *testing.T) { } } -// Certain labels for metrics are dependant on client intial setup. This tests +// Certain labels for metrics are dependant on client initial setup. This tests // that the client has properly initialized before we assign values to labels func TestClient_BaseLabels(t *testing.T) { t.Parallel() diff --git a/client/consul_template.go b/client/consul_template.go index 2f7a62935..52dd718e5 100644 --- a/client/consul_template.go +++ b/client/consul_template.go @@ -40,12 +40,6 @@ const ( DefaultMaxTemplateEventRate = 3 * time.Second ) -var ( - // testRetryRate is used to speed up tests by setting consul-templates retry - // rate to something low - testRetryRate time.Duration = 0 -) - // TaskHooks is an interface which provides hooks into the tasks life-cycle type TaskHooks interface { // Restart is used to restart the task diff --git a/client/driver/docker.go b/client/driver/docker.go index 894cfafc3..01c89838f 100644 --- a/client/driver/docker.go +++ b/client/driver/docker.go @@ -349,7 +349,6 @@ type DockerHandle struct { ImageID string containerID string version string - clkSpeed float64 killTimeout time.Duration maxKillTimeout time.Duration resourceUsageLock sync.RWMutex @@ -427,108 +426,108 @@ func (d *DockerDriver) Validate(config map[string]interface{}) error { fd := &fields.FieldData{ Raw: config, Schema: map[string]*fields.FieldSchema{ - "image": &fields.FieldSchema{ + "image": { Type: fields.TypeString, Required: true, }, - "load": &fields.FieldSchema{ + "load": { Type: fields.TypeString, }, - "command": &fields.FieldSchema{ + "command": { Type: fields.TypeString, }, - "args": &fields.FieldSchema{ + "args": { Type: fields.TypeArray, }, - "ipc_mode": &fields.FieldSchema{ + "ipc_mode": { Type: fields.TypeString, }, - "network_mode": &fields.FieldSchema{ + "network_mode": { Type: fields.TypeString, }, - "network_aliases": &fields.FieldSchema{ + "network_aliases": { Type: fields.TypeArray, }, - "ipv4_address": &fields.FieldSchema{ + "ipv4_address": { Type: fields.TypeString, }, - "ipv6_address": &fields.FieldSchema{ + "ipv6_address": { Type: fields.TypeString, }, - "mac_address": &fields.FieldSchema{ + "mac_address": { Type: fields.TypeString, }, - "pid_mode": &fields.FieldSchema{ + "pid_mode": { Type: fields.TypeString, }, - "uts_mode": &fields.FieldSchema{ + "uts_mode": { Type: fields.TypeString, }, - "userns_mode": &fields.FieldSchema{ + "userns_mode": { Type: fields.TypeString, }, - "port_map": &fields.FieldSchema{ + "port_map": { Type: fields.TypeArray, }, - "privileged": &fields.FieldSchema{ + "privileged": { Type: fields.TypeBool, }, - "dns_servers": &fields.FieldSchema{ + "dns_servers": { Type: fields.TypeArray, }, - "dns_options": &fields.FieldSchema{ + "dns_options": { Type: fields.TypeArray, }, - "dns_search_domains": &fields.FieldSchema{ + "dns_search_domains": { Type: fields.TypeArray, }, - "extra_hosts": &fields.FieldSchema{ + "extra_hosts": { Type: fields.TypeArray, }, - "hostname": &fields.FieldSchema{ + "hostname": { Type: fields.TypeString, }, - "labels": &fields.FieldSchema{ + "labels": { Type: fields.TypeArray, }, - "auth": &fields.FieldSchema{ + "auth": { Type: fields.TypeArray, }, - "auth_soft_fail": &fields.FieldSchema{ + "auth_soft_fail": { Type: fields.TypeBool, }, // COMPAT: Remove in 0.6.0. SSL is no longer needed - "ssl": &fields.FieldSchema{ + "ssl": { Type: fields.TypeBool, }, - "tty": &fields.FieldSchema{ + "tty": { Type: fields.TypeBool, }, - "interactive": &fields.FieldSchema{ + "interactive": { Type: fields.TypeBool, }, - "shm_size": &fields.FieldSchema{ + "shm_size": { Type: fields.TypeInt, }, - "work_dir": &fields.FieldSchema{ + "work_dir": { Type: fields.TypeString, }, - "logging": &fields.FieldSchema{ + "logging": { Type: fields.TypeArray, }, - "volumes": &fields.FieldSchema{ + "volumes": { Type: fields.TypeArray, }, - "volume_driver": &fields.FieldSchema{ + "volume_driver": { Type: fields.TypeString, }, "mounts": { Type: fields.TypeArray, }, - "force_pull": &fields.FieldSchema{ + "force_pull": { Type: fields.TypeBool, }, - "security_opt": &fields.FieldSchema{ + "security_opt": { Type: fields.TypeArray, }, }, @@ -1153,7 +1152,7 @@ func (d *DockerDriver) createContainerConfig(ctx *ExecContext, task *structs.Tas if len(driverConfig.NetworkAliases) > 0 || driverConfig.IPv4Address != "" || driverConfig.IPv6Address != "" { networkingConfig = &docker.NetworkingConfig{ EndpointsConfig: map[string]*docker.EndpointConfig{ - hostConfig.NetworkMode: &docker.EndpointConfig{}, + hostConfig.NetworkMode: {}, }, } } @@ -1414,7 +1413,7 @@ func (d *DockerDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, er // Look for a running container with this ID containers, err := client.ListContainers(docker.ListContainersOptions{ Filters: map[string][]string{ - "id": []string{pid.ContainerID}, + "id": {pid.ContainerID}, }, }) if err != nil { diff --git a/client/driver/docker_default.go b/client/driver/docker_default.go index ee600b141..d8d4bb53a 100644 --- a/client/driver/docker_default.go +++ b/client/driver/docker_default.go @@ -10,5 +10,5 @@ const ( ) func getPortBinding(ip string, port string) []docker.PortBinding { - return []docker.PortBinding{docker.PortBinding{HostIP: ip, HostPort: port}} + return []docker.PortBinding{{HostIP: ip, HostPort: port}} } diff --git a/client/driver/docker_test.go b/client/driver/docker_test.go index 3a966f62a..c7f561051 100644 --- a/client/driver/docker_test.go +++ b/client/driver/docker_test.go @@ -68,7 +68,7 @@ func dockerTask() (*structs.Task, int, int) { MemoryMB: 256, CPU: 512, Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ + { IP: "127.0.0.1", ReservedPorts: []structs.Port{{Label: "main", Value: docker_reserved}}, DynamicPorts: []structs.Port{{Label: "REDIS", Value: docker_dynamic}}, @@ -772,7 +772,7 @@ func TestDockerDriver_Labels(t *testing.T) { } task, _, _ := dockerTask() task.Config["labels"] = []map[string]string{ - map[string]string{ + { "label1": "value1", "label2": "value2", }, @@ -955,10 +955,10 @@ func TestDockerDriver_PortsNoMap(t *testing.T) { // Verify that the correct ports are EXPOSED expectedExposedPorts := map[docker.Port]struct{}{ - docker.Port(fmt.Sprintf("%d/tcp", res)): struct{}{}, - docker.Port(fmt.Sprintf("%d/udp", res)): struct{}{}, - docker.Port(fmt.Sprintf("%d/tcp", dyn)): struct{}{}, - docker.Port(fmt.Sprintf("%d/udp", dyn)): struct{}{}, + docker.Port(fmt.Sprintf("%d/tcp", res)): {}, + docker.Port(fmt.Sprintf("%d/udp", res)): {}, + docker.Port(fmt.Sprintf("%d/tcp", dyn)): {}, + docker.Port(fmt.Sprintf("%d/udp", dyn)): {}, } if !reflect.DeepEqual(container.Config.ExposedPorts, expectedExposedPorts) { @@ -967,10 +967,10 @@ func TestDockerDriver_PortsNoMap(t *testing.T) { // Verify that the correct ports are FORWARDED expectedPortBindings := map[docker.Port][]docker.PortBinding{ - docker.Port(fmt.Sprintf("%d/tcp", res)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, - docker.Port(fmt.Sprintf("%d/udp", res)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, - docker.Port(fmt.Sprintf("%d/tcp", dyn)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, - docker.Port(fmt.Sprintf("%d/udp", dyn)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, + docker.Port(fmt.Sprintf("%d/tcp", res)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, + docker.Port(fmt.Sprintf("%d/udp", res)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, + docker.Port(fmt.Sprintf("%d/tcp", dyn)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, + docker.Port(fmt.Sprintf("%d/udp", dyn)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, } if !reflect.DeepEqual(container.HostConfig.PortBindings, expectedPortBindings) { @@ -996,7 +996,7 @@ func TestDockerDriver_PortsMapping(t *testing.T) { } task, res, dyn := dockerTask() task.Config["port_map"] = []map[string]string{ - map[string]string{ + { "main": "8080", "REDIS": "6379", }, @@ -1014,10 +1014,10 @@ func TestDockerDriver_PortsMapping(t *testing.T) { // Verify that the correct ports are EXPOSED expectedExposedPorts := map[docker.Port]struct{}{ - docker.Port("8080/tcp"): struct{}{}, - docker.Port("8080/udp"): struct{}{}, - docker.Port("6379/tcp"): struct{}{}, - docker.Port("6379/udp"): struct{}{}, + docker.Port("8080/tcp"): {}, + docker.Port("8080/udp"): {}, + docker.Port("6379/tcp"): {}, + docker.Port("6379/udp"): {}, } if !reflect.DeepEqual(container.Config.ExposedPorts, expectedExposedPorts) { @@ -1026,10 +1026,10 @@ func TestDockerDriver_PortsMapping(t *testing.T) { // Verify that the correct ports are FORWARDED expectedPortBindings := map[docker.Port][]docker.PortBinding{ - docker.Port("8080/tcp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, - docker.Port("8080/udp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, - docker.Port("6379/tcp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, - docker.Port("6379/udp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, + docker.Port("8080/tcp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, + docker.Port("8080/udp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, + docker.Port("6379/tcp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, + docker.Port("6379/udp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, } if !reflect.DeepEqual(container.HostConfig.PortBindings, expectedPortBindings) { diff --git a/client/driver/docker_windows.go b/client/driver/docker_windows.go index 92ba2ff6b..8b16d17d5 100644 --- a/client/driver/docker_windows.go +++ b/client/driver/docker_windows.go @@ -9,5 +9,5 @@ const ( //Currently Windows containers don't support host ip in port binding. func getPortBinding(ip string, port string) []docker.PortBinding { - return []docker.PortBinding{docker.PortBinding{HostIP: "", HostPort: port}} + return []docker.PortBinding{{HostIP: "", HostPort: port}} } diff --git a/client/driver/driver.go b/client/driver/driver.go index 5a762fb94..47bc38af6 100644 --- a/client/driver/driver.go +++ b/client/driver/driver.go @@ -88,7 +88,7 @@ func NewCreatedResources() *CreatedResources { // Add a new resource if it doesn't already exist. func (r *CreatedResources) Add(k, v string) { if r.Resources == nil { - r.Resources = map[string][]string{k: []string{v}} + r.Resources = map[string][]string{k: {v}} return } existing, ok := r.Resources[k] @@ -340,16 +340,6 @@ func NewExecContext(td *allocdir.TaskDir, te *env.TaskEnv) *ExecContext { } } -func mapMergeStrInt(maps ...map[string]int) map[string]int { - out := map[string]int{} - for _, in := range maps { - for key, val := range in { - out[key] = val - } - } - return out -} - func mapMergeStrStr(maps ...map[string]string) map[string]string { out := map[string]string{} for _, in := range maps { diff --git a/client/driver/driver_test.go b/client/driver/driver_test.go index 60a2bea40..a230f43e0 100644 --- a/client/driver/driver_test.go +++ b/client/driver/driver_test.go @@ -23,7 +23,7 @@ var basicResources = &structs.Resources{ MemoryMB: 256, DiskMB: 20, Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ + { IP: "0.0.0.0", ReservedPorts: []structs.Port{{Label: "main", Value: 12345}}, DynamicPorts: []structs.Port{{Label: "HTTP", Value: 43330}}, @@ -140,7 +140,7 @@ func setupTaskEnv(t *testing.T, driver string) (*allocdir.TaskDir, map[string]st CPU: 1000, MemoryMB: 500, Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ + { IP: "1.2.3.4", ReservedPorts: []structs.Port{{Label: "one", Value: 80}, {Label: "two", Value: 443}}, DynamicPorts: []structs.Port{{Label: "admin", Value: 8081}, {Label: "web", Value: 8086}}, @@ -304,31 +304,6 @@ func TestDriver_TaskEnv_Image(t *testing.T) { } } -func TestMapMergeStrInt(t *testing.T) { - t.Parallel() - a := map[string]int{ - "cakes": 5, - "cookies": 3, - } - - b := map[string]int{ - "cakes": 3, - "pies": 2, - } - - c := mapMergeStrInt(a, b) - - d := map[string]int{ - "cakes": 3, - "cookies": 3, - "pies": 2, - } - - if !reflect.DeepEqual(c, d) { - t.Errorf("\nExpected\n%+v\nGot\n%+v\n", d, c) - } -} - func TestMapMergeStrStr(t *testing.T) { t.Parallel() a := map[string]string{ diff --git a/client/driver/env/env_test.go b/client/driver/env/env_test.go index 1f288997d..6ea4e72e6 100644 --- a/client/driver/env/env_test.go +++ b/client/driver/env/env_test.go @@ -159,7 +159,7 @@ func TestEnvironment_AsList(t *testing.T) { "taskEnvKey": "taskEnvVal", } task.Resources.Networks = []*structs.NetworkResource{ - &structs.NetworkResource{ + { IP: "127.0.0.1", ReservedPorts: []structs.Port{{Label: "http", Value: 80}}, DynamicPorts: []structs.Port{{Label: "https", Value: 8080}}, diff --git a/client/driver/exec.go b/client/driver/exec.go index 751c2081e..9b0772847 100644 --- a/client/driver/exec.go +++ b/client/driver/exec.go @@ -20,12 +20,6 @@ import ( "github.com/mitchellh/mapstructure" ) -const ( - // The key populated in Node Attributes to indicate the presence of the Exec - // driver - execDriverAttr = "driver.exec" -) - // ExecDriver fork/execs tasks using as many of the underlying OS's isolation // features. type ExecDriver struct { @@ -66,11 +60,11 @@ func (d *ExecDriver) Validate(config map[string]interface{}) error { fd := &fields.FieldData{ Raw: config, Schema: map[string]*fields.FieldSchema{ - "command": &fields.FieldSchema{ + "command": { Type: fields.TypeString, Required: true, }, - "args": &fields.FieldSchema{ + "args": { Type: fields.TypeArray, }, }, diff --git a/client/driver/exec_linux.go b/client/driver/exec_linux.go index 958b60b4e..ab3203a49 100644 --- a/client/driver/exec_linux.go +++ b/client/driver/exec_linux.go @@ -7,6 +7,12 @@ import ( "golang.org/x/sys/unix" ) +const ( + // The key populated in Node Attributes to indicate the presence of the Exec + // driver + execDriverAttr = "driver.exec" +) + func (d *ExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { // Only enable if cgroups are available and we are root if !cgroupsMounted(node) { diff --git a/client/driver/executor/executor.go b/client/driver/executor/executor.go index 744f65df0..4153a9778 100644 --- a/client/driver/executor/executor.go +++ b/client/driver/executor/executor.go @@ -163,8 +163,6 @@ type UniversalExecutor struct { lro *logging.FileRotator rotatorLock sync.Mutex - shutdownCh chan struct{} - syslogServer *logging.SyslogServer syslogChan chan *logging.SyslogMessage diff --git a/client/driver/executor/executor_test.go b/client/driver/executor/executor_test.go index b4594635a..fb9026678 100644 --- a/client/driver/executor/executor_test.go +++ b/client/driver/executor/executor_test.go @@ -14,24 +14,10 @@ import ( "github.com/hashicorp/nomad/client/driver/env" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/mock" - "github.com/hashicorp/nomad/nomad/structs" tu "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/go-ps" ) -var ( - constraint = &structs.Resources{ - CPU: 250, - MemoryMB: 256, - Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ - MBits: 50, - DynamicPorts: []structs.Port{{Label: "http"}}, - }, - }, - } -) - func testLogger() *log.Logger { return log.New(os.Stderr, "", log.LstdFlags) } diff --git a/client/driver/java.go b/client/driver/java.go index 601ed5263..580eb7e55 100644 --- a/client/driver/java.go +++ b/client/driver/java.go @@ -80,19 +80,19 @@ func (d *JavaDriver) Validate(config map[string]interface{}) error { fd := &fields.FieldData{ Raw: config, Schema: map[string]*fields.FieldSchema{ - "class": &fields.FieldSchema{ + "class": { Type: fields.TypeString, }, - "class_path": &fields.FieldSchema{ + "class_path": { Type: fields.TypeString, }, - "jar_path": &fields.FieldSchema{ + "jar_path": { Type: fields.TypeString, }, - "jvm_options": &fields.FieldSchema{ + "jvm_options": { Type: fields.TypeArray, }, - "args": &fields.FieldSchema{ + "args": { Type: fields.TypeArray, }, }, diff --git a/client/driver/logging/rotator.go b/client/driver/logging/rotator.go index 7494ad09c..bd3ed86a1 100644 --- a/client/driver/logging/rotator.go +++ b/client/driver/logging/rotator.go @@ -190,7 +190,7 @@ func (f *FileRotator) createFile() error { // flushPeriodically flushes the buffered writer every 100ms to the underlying // file func (f *FileRotator) flushPeriodically() { - for _ = range f.flushTicker.C { + for range f.flushTicker.C { f.flushBuffer() } } diff --git a/client/driver/logging/syslog_parser.go b/client/driver/logging/syslog_parser.go index f65c510f0..b12e97e0b 100644 --- a/client/driver/logging/syslog_parser.go +++ b/client/driver/logging/syslog_parser.go @@ -93,7 +93,7 @@ func (d *DockerLogParser) logContentIndex(line []byte) int { } } } - // then the colon is what seperates it, followed by a space + // then the colon is what separates it, followed by a space for i := cursor; i < len(line); i++ { if line[i] == ':' && i+1 < len(line) && line[i+1] == ' ' { cursor = i + 1 diff --git a/client/driver/logging/syslog_server_unix_test.go b/client/driver/logging/syslog_server_unix_test.go index e031bf941..2540d9100 100644 --- a/client/driver/logging/syslog_server_unix_test.go +++ b/client/driver/logging/syslog_server_unix_test.go @@ -33,7 +33,7 @@ func TestSyslogServer_Start_Shutdown(t *testing.T) { received := false go func() { - for _ = range s.messages { + for range s.messages { received = true } }() diff --git a/client/driver/logging/universal_collector.go b/client/driver/logging/universal_collector.go index ab6885c87..7cb5da136 100644 --- a/client/driver/logging/universal_collector.go +++ b/client/driver/logging/universal_collector.go @@ -56,9 +56,7 @@ type LogCollector interface { // SyslogCollector is a LogCollector which starts a syslog server and does // rotation to incoming stream type SyslogCollector struct { - addr net.Addr - logConfig *structs.LogConfig - ctx *LogCollectorContext + ctx *LogCollectorContext lro *FileRotator lre *FileRotator diff --git a/client/driver/lxc.go b/client/driver/lxc.go index 9513c74a2..36c6e0e99 100644 --- a/client/driver/lxc.go +++ b/client/driver/lxc.go @@ -81,59 +81,59 @@ func (d *LxcDriver) Validate(config map[string]interface{}) error { fd := &fields.FieldData{ Raw: config, Schema: map[string]*fields.FieldSchema{ - "template": &fields.FieldSchema{ + "template": { Type: fields.TypeString, Required: true, }, - "distro": &fields.FieldSchema{ + "distro": { Type: fields.TypeString, Required: false, }, - "release": &fields.FieldSchema{ + "release": { Type: fields.TypeString, Required: false, }, - "arch": &fields.FieldSchema{ + "arch": { Type: fields.TypeString, Required: false, }, - "image_variant": &fields.FieldSchema{ + "image_variant": { Type: fields.TypeString, Required: false, }, - "image_server": &fields.FieldSchema{ + "image_server": { Type: fields.TypeString, Required: false, }, - "gpg_key_id": &fields.FieldSchema{ + "gpg_key_id": { Type: fields.TypeString, Required: false, }, - "gpg_key_server": &fields.FieldSchema{ + "gpg_key_server": { Type: fields.TypeString, Required: false, }, - "disable_gpg": &fields.FieldSchema{ + "disable_gpg": { Type: fields.TypeString, Required: false, }, - "flush_cache": &fields.FieldSchema{ + "flush_cache": { Type: fields.TypeString, Required: false, }, - "force_cache": &fields.FieldSchema{ + "force_cache": { Type: fields.TypeString, Required: false, }, - "template_args": &fields.FieldSchema{ + "template_args": { Type: fields.TypeArray, Required: false, }, - "log_level": &fields.FieldSchema{ + "log_level": { Type: fields.TypeString, Required: false, }, - "verbosity": &fields.FieldSchema{ + "verbosity": { Type: fields.TypeString, Required: false, }, diff --git a/client/driver/qemu.go b/client/driver/qemu.go index 8e744940d..ae35e4d39 100644 --- a/client/driver/qemu.go +++ b/client/driver/qemu.go @@ -74,17 +74,17 @@ func (d *QemuDriver) Validate(config map[string]interface{}) error { fd := &fields.FieldData{ Raw: config, Schema: map[string]*fields.FieldSchema{ - "image_path": &fields.FieldSchema{ + "image_path": { Type: fields.TypeString, Required: true, }, - "accelerator": &fields.FieldSchema{ + "accelerator": { Type: fields.TypeString, }, - "port_map": &fields.FieldSchema{ + "port_map": { Type: fields.TypeArray, }, - "args": &fields.FieldSchema{ + "args": { Type: fields.TypeArray, }, }, diff --git a/client/driver/qemu_test.go b/client/driver/qemu_test.go index 0aedb5ddf..0bdb976ea 100644 --- a/client/driver/qemu_test.go +++ b/client/driver/qemu_test.go @@ -72,7 +72,7 @@ func TestQemuDriver_StartOpen_Wait(t *testing.T) { CPU: 500, MemoryMB: 512, Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ + { ReservedPorts: []structs.Port{{Label: "main", Value: 22000}, {Label: "web", Value: 80}}, }, }, @@ -142,7 +142,7 @@ func TestQemuDriverUser(t *testing.T) { CPU: 500, MemoryMB: 512, Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ + { ReservedPorts: []structs.Port{{Label: "main", Value: 22000}, {Label: "web", Value: 80}}, }, }, diff --git a/client/driver/raw_exec.go b/client/driver/raw_exec.go index c12c8cdbe..9e0c34465 100644 --- a/client/driver/raw_exec.go +++ b/client/driver/raw_exec.go @@ -64,11 +64,11 @@ func (d *RawExecDriver) Validate(config map[string]interface{}) error { fd := &fields.FieldData{ Raw: config, Schema: map[string]*fields.FieldSchema{ - "command": &fields.FieldSchema{ + "command": { Type: fields.TypeString, Required: true, }, - "args": &fields.FieldSchema{ + "args": { Type: fields.TypeArray, }, }, diff --git a/client/driver/rkt.go b/client/driver/rkt.go index 15791fdfe..4d551f8c3 100644 --- a/client/driver/rkt.go +++ b/client/driver/rkt.go @@ -126,41 +126,41 @@ func (d *RktDriver) Validate(config map[string]interface{}) error { fd := &fields.FieldData{ Raw: config, Schema: map[string]*fields.FieldSchema{ - "image": &fields.FieldSchema{ + "image": { Type: fields.TypeString, Required: true, }, - "command": &fields.FieldSchema{ + "command": { Type: fields.TypeString, }, - "args": &fields.FieldSchema{ + "args": { Type: fields.TypeArray, }, - "trust_prefix": &fields.FieldSchema{ + "trust_prefix": { Type: fields.TypeString, }, - "dns_servers": &fields.FieldSchema{ + "dns_servers": { Type: fields.TypeArray, }, - "dns_search_domains": &fields.FieldSchema{ + "dns_search_domains": { Type: fields.TypeArray, }, - "net": &fields.FieldSchema{ + "net": { Type: fields.TypeArray, }, - "port_map": &fields.FieldSchema{ + "port_map": { Type: fields.TypeArray, }, - "debug": &fields.FieldSchema{ + "debug": { Type: fields.TypeBool, }, - "volumes": &fields.FieldSchema{ + "volumes": { Type: fields.TypeArray, }, - "no_overlay": &fields.FieldSchema{ + "no_overlay": { Type: fields.TypeBool, }, - "insecure_options": &fields.FieldSchema{ + "insecure_options": { Type: fields.TypeArray, }, }, diff --git a/client/driver/rkt_test.go b/client/driver/rkt_test.go index 896fc9036..1af4564df 100644 --- a/client/driver/rkt_test.go +++ b/client/driver/rkt_test.go @@ -451,7 +451,7 @@ func TestRktDriver_PortsMapping(t *testing.T) { "image": "docker://redis:latest", "args": []string{"--version"}, "port_map": []map[string]string{ - map[string]string{ + { "main": "6379-tcp", }, }, @@ -465,7 +465,7 @@ func TestRktDriver_PortsMapping(t *testing.T) { MemoryMB: 256, CPU: 512, Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ + { IP: "127.0.0.1", ReservedPorts: []structs.Port{{Label: "main", Value: 8080}}, }, diff --git a/client/driver/utils_unix.go b/client/driver/utils_unix.go index 397641e3e..474cdcf17 100644 --- a/client/driver/utils_unix.go +++ b/client/driver/utils_unix.go @@ -16,11 +16,3 @@ func isolateCommand(cmd *exec.Cmd) { } cmd.SysProcAttr.Setsid = true } - -// setChroot on a command -func setChroot(cmd *exec.Cmd, chroot string) { - if cmd.SysProcAttr == nil { - cmd.SysProcAttr = &syscall.SysProcAttr{} - } - cmd.SysProcAttr.Chroot = chroot -} diff --git a/client/driver/utils_windows.go b/client/driver/utils_windows.go index ff56089d1..5b2b7d842 100644 --- a/client/driver/utils_windows.go +++ b/client/driver/utils_windows.go @@ -7,7 +7,3 @@ import ( // TODO Figure out if this is needed in Wondows func isolateCommand(cmd *exec.Cmd) { } - -// setChroot is a noop on Windows -func setChroot(cmd *exec.Cmd, chroot string) { -} diff --git a/client/fingerprint/env_aws.go b/client/fingerprint/env_aws.go index aafd21fc9..cd1ff888b 100644 --- a/client/fingerprint/env_aws.go +++ b/client/fingerprint/env_aws.go @@ -236,10 +236,15 @@ func (f *EnvAWSFingerprint) linkSpeed() int { } res, err := client.Get(metadataURL + "instance-type") + if err != nil { + f.logger.Printf("[ERR]: fingerprint.env_aws: Error reading instance-type: %v", err) + return 0 + } + body, err := ioutil.ReadAll(res.Body) res.Body.Close() if err != nil { - f.logger.Printf("[ERR]: fingerprint.env_aws: Error reading response body for instance-type") + f.logger.Printf("[ERR]: fingerprint.env_aws: Error reading response body for instance-type: %v", err) return 0 } diff --git a/client/fingerprint/env_gce.go b/client/fingerprint/env_gce.go index 506412e70..83da63486 100644 --- a/client/fingerprint/env_gce.go +++ b/client/fingerprint/env_gce.go @@ -167,7 +167,7 @@ func (f *EnvGCEFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) if unique { key = structs.UniqueNamespace(key) } - node.Attributes[key] = strings.Trim(string(value), "\n") + node.Attributes[key] = strings.Trim(value, "\n") } // These keys need everything before the final slash removed to be usable. @@ -190,18 +190,23 @@ func (f *EnvGCEFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) // Get internal and external IPs (if they exist) value, err := f.Get("network-interfaces/", true) - var interfaces []GCEMetadataNetworkInterface - if err := json.Unmarshal([]byte(value), &interfaces); err != nil { - f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding network interface information: %s", err.Error()) - } + if err != nil { + f.logger.Printf("[WARN] fingerprint.env_gce: Error retrieving network interface information: %s", err) + } else { - for _, intf := range interfaces { - prefix := "platform.gce.network." + lastToken(intf.Network) - uniquePrefix := "unique." + prefix - node.Attributes[prefix] = "true" - node.Attributes[uniquePrefix+".ip"] = strings.Trim(intf.Ip, "\n") - for index, accessConfig := range intf.AccessConfigs { - node.Attributes[uniquePrefix+".external-ip."+strconv.Itoa(index)] = accessConfig.ExternalIp + var interfaces []GCEMetadataNetworkInterface + if err := json.Unmarshal([]byte(value), &interfaces); err != nil { + f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding network interface information: %s", err.Error()) + } + + for _, intf := range interfaces { + prefix := "platform.gce.network." + lastToken(intf.Network) + uniquePrefix := "unique." + prefix + node.Attributes[prefix] = "true" + node.Attributes[uniquePrefix+".ip"] = strings.Trim(intf.Ip, "\n") + for index, accessConfig := range intf.AccessConfigs { + node.Attributes[uniquePrefix+".external-ip."+strconv.Itoa(index)] = accessConfig.ExternalIp + } } } diff --git a/client/fingerprint/vault.go b/client/fingerprint/vault.go index acc331c0f..a8728fc98 100644 --- a/client/fingerprint/vault.go +++ b/client/fingerprint/vault.go @@ -63,7 +63,7 @@ func (f *VaultFingerprint) Fingerprint(config *client.Config, node *structs.Node } node.Attributes["vault.accessible"] = strconv.FormatBool(true) - // We strip the Vault prefix becasue < 0.6.2 the version looks like: + // We strip the Vault prefix because < 0.6.2 the version looks like: // status.Version = "Vault v0.6.1" node.Attributes["vault.version"] = strings.TrimPrefix(status.Version, "Vault ") node.Attributes["vault.cluster_id"] = status.ClusterID diff --git a/client/restarts.go b/client/restarts.go index c403b6f05..5fd3216ce 100644 --- a/client/restarts.go +++ b/client/restarts.go @@ -201,9 +201,3 @@ func (r *RestartTracker) jitter() time.Duration { j := float64(r.rand.Int63n(d)) * jitter return time.Duration(d + int64(j)) } - -// Returns a tracker that never restarts. -func noRestartsTracker() *RestartTracker { - policy := &structs.RestartPolicy{Attempts: 0, Mode: structs.RestartPolicyModeFail} - return newRestartTracker(policy, structs.JobTypeBatch) -} diff --git a/client/state_database.go b/client/state_database.go index 4a5784a6a..a9a36a5f9 100644 --- a/client/state_database.go +++ b/client/state_database.go @@ -108,17 +108,6 @@ func getAllocationBucket(tx *bolt.Tx, allocID string) (*bolt.Bucket, error) { return alloc, nil } -func allocationBucketExists(tx *bolt.Tx, allocID string) bool { - allocations := tx.Bucket(allocationsBucket) - if allocations == nil { - return false - } - - // Retrieve the specific allocations bucket - alloc := allocations.Bucket([]byte(allocID)) - return alloc != nil -} - // getTaskBucket returns the bucket used to persist state about a // particular task. If the root allocation bucket, the specific // allocation or task bucket doesn't exist, they will be created as long as the diff --git a/client/stats/cpu.go b/client/stats/cpu.go index 3e98048e9..47a9ee3cc 100644 --- a/client/stats/cpu.go +++ b/client/stats/cpu.go @@ -11,7 +11,6 @@ import ( type CpuStats struct { prevCpuTime float64 prevTime time.Time - clkSpeed float64 totalCpus int } diff --git a/client/stats/host.go b/client/stats/host.go index d528ca784..d284973e7 100644 --- a/client/stats/host.go +++ b/client/stats/host.go @@ -61,7 +61,6 @@ type NodeStatsCollector interface { // HostStatsCollector collects host resource usage stats type HostStatsCollector struct { - clkSpeed float64 numCores int statsCalculator map[string]*HostCpuStatsCalculator logger *log.Logger diff --git a/client/task_runner.go b/client/task_runner.go index a5d96726e..4ab4b4f1f 100644 --- a/client/task_runner.go +++ b/client/task_runner.go @@ -268,14 +268,33 @@ func NewTaskRunner(logger *log.Logger, config *config.Config, signalCh: make(chan SignalEvent), } - tc.baseLabels = []metrics.Label{{"job", tc.alloc.Job.Name}, {"task_group", tc.alloc.TaskGroup}, {"alloc_id", tc.alloc.ID}, {"task", tc.task.Name}} + tc.baseLabels = []metrics.Label{ + { + Name: "job", + Value: tc.alloc.Job.Name, + }, + { + Name: "task_group", + Value: tc.alloc.TaskGroup, + }, + { + Name: "alloc_id", + Value: tc.alloc.ID, + }, + { + Name: "task", + Value: tc.task.Name, + }, + } return tc } // MarkReceived marks the task as received. func (r *TaskRunner) MarkReceived() { - r.updater(r.task.Name, structs.TaskStatePending, structs.NewTaskEvent(structs.TaskReceived), false) + // We lazy sync this since there will be a follow up message almost + // immediately. + r.updater(r.task.Name, structs.TaskStatePending, structs.NewTaskEvent(structs.TaskReceived), true) } // WaitCh returns a channel to wait for termination @@ -1677,7 +1696,7 @@ func (r *TaskRunner) handleDestroy(handle driver.DriverHandle) (destroyed bool, r.logger.Printf("[ERR] client: failed to kill task '%s' for alloc %q. Retrying in %v: %v", r.task.Name, r.alloc.ID, backoff, err) - time.Sleep(time.Duration(backoff)) + time.Sleep(backoff) } else { // Kill was successful return true, nil diff --git a/client/task_runner_test.go b/client/task_runner_test.go index f532e77df..d12369307 100644 --- a/client/task_runner_test.go +++ b/client/task_runner_test.go @@ -39,6 +39,12 @@ func prefixedTestLogger(prefix string) *log.Logger { return log.New(ioutil.Discard, "", 0) } +// Returns a tracker that never restarts. +func noRestartsTracker() *RestartTracker { + policy := &structs.RestartPolicy{Attempts: 0, Mode: structs.RestartPolicyModeFail} + return newRestartTracker(policy, structs.JobTypeBatch) +} + type MockTaskStateUpdater struct { state string failed bool @@ -1073,10 +1079,6 @@ func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) { func TestTaskRunner_Template_Block(t *testing.T) { t.Parallel() - testRetryRate = 2 * time.Second - defer func() { - testRetryRate = 0 - }() alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] task.Driver = "mock_driver" diff --git a/client/util_test.go b/client/util_test.go index 7e1722027..4022ba3e9 100644 --- a/client/util_test.go +++ b/client/util_test.go @@ -29,7 +29,7 @@ func TestDiffAllocs(t *testing.T) { alloc4.ID: alloc4, }, filtered: map[string]struct{}{ - alloc1.ID: struct{}{}, + alloc1.ID: {}, }, } diff --git a/client/vaultclient/vaultclient.go b/client/vaultclient/vaultclient.go index cb17ced33..7fe7958ed 100644 --- a/client/vaultclient/vaultclient.go +++ b/client/vaultclient/vaultclient.go @@ -65,9 +65,6 @@ type vaultClient struct { // running indicates if the renewal loop is active or not running bool - // tokenData is the data of the passed VaultClient token - token *tokenData - // client is the API client to interact with vault client *vaultapi.Client @@ -88,17 +85,6 @@ type vaultClient struct { logger *log.Logger } -// tokenData holds the relevant information about the Vault token passed to the -// client. -type tokenData struct { - CreationTTL int `mapstructure:"creation_ttl"` - TTL int `mapstructure:"ttl"` - Renewable bool `mapstructure:"renewable"` - Policies []string `mapstructure:"policies"` - Role string `mapstructure:"role"` - Root bool -} - // vaultClientRenewalRequest is a request object for renewal of both tokens and // secret's leases. type vaultClientRenewalRequest struct { diff --git a/command/agent/agent.go b/command/agent/agent.go index c33d587f8..6806409eb 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -25,10 +25,8 @@ import ( ) const ( - clientHttpCheckInterval = 10 * time.Second - clientHttpCheckTimeout = 3 * time.Second - serverHttpCheckInterval = 10 * time.Second - serverHttpCheckTimeout = 6 * time.Second + agentHttpCheckInterval = 10 * time.Second + agentHttpCheckTimeout = 5 * time.Second serverRpcCheckInterval = 10 * time.Second serverRpcCheckTimeout = 3 * time.Second serverSerfCheckInterval = 10 * time.Second @@ -419,7 +417,7 @@ func (a *Agent) setupServer() error { PortLabel: a.config.AdvertiseAddrs.RPC, Tags: []string{consul.ServiceTagRPC}, Checks: []*structs.ServiceCheck{ - &structs.ServiceCheck{ + { Name: "Nomad Server RPC Check", Type: "tcp", Interval: serverRpcCheckInterval, @@ -433,7 +431,7 @@ func (a *Agent) setupServer() error { PortLabel: a.config.AdvertiseAddrs.Serf, Tags: []string{consul.ServiceTagSerf}, Checks: []*structs.ServiceCheck{ - &structs.ServiceCheck{ + { Name: "Nomad Server Serf Check", Type: "tcp", Interval: serverSerfCheckInterval, @@ -538,8 +536,8 @@ func (a *Agent) agentHTTPCheck(server bool) *structs.ServiceCheck { Type: "http", Path: "/v1/agent/servers", Protocol: "http", - Interval: clientHttpCheckInterval, - Timeout: clientHttpCheckTimeout, + Interval: agentHttpCheckInterval, + Timeout: agentHttpCheckTimeout, PortLabel: httpCheckAddr, } // Switch to endpoint that doesn't require a leader for servers diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index 99e3c3953..3bfb4a618 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -135,17 +135,12 @@ func TestHTTP_AgentSetServers(t *testing.T) { t.Parallel() assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { - // Establish a baseline number of servers - req, err := http.NewRequest("GET", "/v1/agent/servers", nil) - assert.Nil(err) - respW := httptest.NewRecorder() - // Create the request - req, err = http.NewRequest("PUT", "/v1/agent/servers", nil) + req, err := http.NewRequest("PUT", "/v1/agent/servers", nil) assert.Nil(err) // Send the request - respW = httptest.NewRecorder() + respW := httptest.NewRecorder() _, err = s.Server.AgentServersRequest(respW, req) assert.NotNil(err) assert.Contains(err.Error(), "missing server address") diff --git a/command/agent/config_parse_test.go b/command/agent/config_parse_test.go index 2d5902742..15ad5dc16 100644 --- a/command/agent/config_parse_test.go +++ b/command/agent/config_parse_test.go @@ -179,12 +179,12 @@ func TestConfig_Parse(t *testing.T) { }, Sentinel: &config.SentinelConfig{ Imports: []*config.SentinelImport{ - &config.SentinelImport{ + { Name: "foo", Path: "foo", Args: []string{"a", "b", "c"}, }, - &config.SentinelImport{ + { Name: "bar", Path: "bar", Args: []string{"x", "y", "z"}, diff --git a/command/agent/config_test.go b/command/agent/config_test.go index d33c239df..6b4f4cf94 100644 --- a/command/agent/config_test.go +++ b/command/agent/config_test.go @@ -315,7 +315,7 @@ func TestConfig_Merge(t *testing.T) { }, Sentinel: &config.SentinelConfig{ Imports: []*config.SentinelImport{ - &config.SentinelImport{ + { Name: "foo", Path: "foo", Args: []string{"a", "b", "c"}, diff --git a/command/agent/consul/client.go b/command/agent/consul/client.go index 5116a3665..1f8dba93d 100644 --- a/command/agent/consul/client.go +++ b/command/agent/consul/client.go @@ -71,10 +71,6 @@ type AgentAPI interface { UpdateTTL(id, output, status string) error } -// addrParser is usually the Task.FindHostAndPortFor method for turning a -// portLabel into an address and port. -type addrParser func(portLabel string) (string, int) - // operations are submitted to the main loop via commit() for synchronizing // with Consul. type operations struct { diff --git a/command/agent/consul/int_test.go b/command/agent/consul/int_test.go index f97f86498..0d4087707 100644 --- a/command/agent/consul/int_test.go +++ b/command/agent/consul/int_test.go @@ -154,6 +154,10 @@ func TestConsul_Integration(t *testing.T) { // Block waiting for the service to appear catalog := consulClient.Catalog() res, meta, err := catalog.Service("httpd2", "test", nil) + if err != nil { + t.Fatalf("bad: %v", err) + } + for i := 0; len(res) == 0 && i < 10; i++ { //Expected initial request to fail, do a blocking query res, meta, err = catalog.Service("httpd2", "test", &consulapi.QueryOptions{WaitIndex: meta.LastIndex + 1, WaitTime: 3 * time.Second}) diff --git a/command/agent/fs_endpoint.go b/command/agent/fs_endpoint.go index 62fcd950c..5bb5f8efb 100644 --- a/command/agent/fs_endpoint.go +++ b/command/agent/fs_endpoint.go @@ -1004,7 +1004,7 @@ func findClosest(entries []*allocdir.AllocFileInfo, desiredIdx, desiredOffset in } // Binary search the indexes to get the desiredIdx - sort.Sort(indexTupleArray(indexes)) + sort.Sort(indexes) i := sort.Search(len(indexes), func(i int) bool { return indexes[i].idx >= desiredIdx }) l := len(indexes) if i == l { diff --git a/command/agent/fs_endpoint_test.go b/command/agent/fs_endpoint_test.go index 2368f27fe..d484cdaad 100644 --- a/command/agent/fs_endpoint_test.go +++ b/command/agent/fs_endpoint_test.go @@ -334,7 +334,7 @@ func TestStreamFramer_Order(t *testing.T) { } expected := bytes.NewBuffer(make([]byte, 0, 100000)) - for _, _ = range files { + for range files { expected.Write(input.Bytes()) } receivedBuf := bytes.NewBuffer(make([]byte, 0, 100000)) @@ -424,7 +424,7 @@ func TestStreamFramer_Order_PlainText(t *testing.T) { } expected := bytes.NewBuffer(make([]byte, 0, 100000)) - for _, _ = range files { + for range files { expected.Write(input.Bytes()) } receivedBuf := bytes.NewBuffer(make([]byte, 0, 100000)) diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index 9cf62f5e6..a8083950f 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -1394,13 +1394,13 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { "hello": "world", }, Services: []*structs.Service{ - &structs.Service{ + { Name: "serviceA", Tags: []string{"1", "2"}, PortLabel: "foo", AddressMode: "auto", Checks: []*structs.ServiceCheck{ - &structs.ServiceCheck{ + { Name: "bar", Type: "http", Command: "foo", diff --git a/command/agent/log_writer.go b/command/agent/log_writer.go index e6e76d0e8..ebb96878b 100644 --- a/command/agent/log_writer.go +++ b/command/agent/log_writer.go @@ -76,7 +76,7 @@ func (l *logWriter) Write(p []byte) (n int, err error) { l.logs[l.index] = string(p) l.index = (l.index + 1) % len(l.logs) - for lh, _ := range l.handlers { + for lh := range l.handlers { lh.HandleLog(string(p)) } return diff --git a/command/alloc_status.go b/command/alloc_status.go index 2cfbdea84..2f206f40b 100644 --- a/command/alloc_status.go +++ b/command/alloc_status.go @@ -9,7 +9,6 @@ import ( "time" humanize "github.com/dustin/go-humanize" - "github.com/mitchellh/colorstring" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/api/contexts" @@ -19,7 +18,6 @@ import ( type AllocStatusCommand struct { Meta - color *colorstring.Colorize } func (c *AllocStatusCommand) Help() string { diff --git a/command/eval_status.go b/command/eval_status.go index 679b7b16b..448b74776 100644 --- a/command/eval_status.go +++ b/command/eval_status.go @@ -247,7 +247,7 @@ func (c *EvalStatusCommand) Run(args []string) int { func sortedTaskGroupFromMetrics(groups map[string]*api.AllocationMetric) []string { tgs := make([]string, 0, len(groups)) - for tg, _ := range groups { + for tg := range groups { tgs = append(tgs, tg) } sort.Strings(tgs) diff --git a/command/meta.go b/command/meta.go index 18a77cd32..de1addd11 100644 --- a/command/meta.go +++ b/command/meta.go @@ -47,7 +47,7 @@ type Meta struct { // namespace to send API requests namespace string - // token is used for ACLs to access privilaged information + // token is used for ACLs to access privileged information token string caCert string diff --git a/command/monitor.go b/command/monitor.go index 1a4937a1c..a8de3b404 100644 --- a/command/monitor.go +++ b/command/monitor.go @@ -49,11 +49,6 @@ type allocState struct { client string clientDesc string index uint64 - - // full is the allocation struct with full details. This - // must be queried for explicitly so it is only included - // if there is important error information inside. - full *api.Allocation } // monitor wraps an evaluation monitor and holds metadata and @@ -328,17 +323,6 @@ func (m *monitor) monitor(evalID string, allowPrefix bool) int { return 0 } -// dumpAllocStatus is a helper to generate a more user-friendly error message -// for scheduling failures, displaying a high level status of why the job -// could not be scheduled out. -func dumpAllocStatus(ui cli.Ui, alloc *api.Allocation, length int) { - // Print filter stats - ui.Output(fmt.Sprintf("Allocation %q status %q (%d/%d nodes filtered)", - limit(alloc.ID, length), alloc.ClientStatus, - alloc.Metrics.NodesFiltered, alloc.Metrics.NodesEvaluated)) - ui.Output(formatAllocMetrics(alloc.Metrics, true, " ")) -} - func formatAllocMetrics(metrics *api.AllocationMetric, scores bool, prefix string) string { // Print a helpful message if we have an eligibility problem var out string diff --git a/command/monitor_test.go b/command/monitor_test.go index 53e7c5e58..96a122131 100644 --- a/command/monitor_test.go +++ b/command/monitor_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" ) @@ -72,7 +71,7 @@ func TestMonitor_Update_Allocs(t *testing.T) { // New allocations write new logs state := &evalState{ allocs: map[string]*allocState{ - "alloc1": &allocState{ + "alloc1": { id: "87654321-abcd-efab-cdef-123456789abc", group: "group1", node: "12345678-abcd-efab-cdef-123456789abc", @@ -110,7 +109,7 @@ func TestMonitor_Update_Allocs(t *testing.T) { // Alloc updates cause more log lines state = &evalState{ allocs: map[string]*allocState{ - "alloc1": &allocState{ + "alloc1": { id: "87654321-abcd-efab-cdef-123456789abc", group: "group1", node: "12345678-abcd-efab-cdef-123456789abc", @@ -145,7 +144,7 @@ func TestMonitor_Update_AllocModification(t *testing.T) { state := &evalState{ index: 2, allocs: map[string]*allocState{ - "alloc3": &allocState{ + "alloc3": { id: "87654321-abcd-bafe-cdef-123456789abc", node: "12345678-abcd-efab-cdef-123456789abc", group: "group2", @@ -286,72 +285,3 @@ func TestMonitor_MonitorWithPrefix(t *testing.T) { } } - -func TestMonitor_DumpAllocStatus(t *testing.T) { - t.Parallel() - ui := new(cli.MockUi) - - // Create an allocation and dump its status to the UI - alloc := &api.Allocation{ - ID: "87654321-abcd-efab-cdef-123456789abc", - TaskGroup: "group1", - ClientStatus: structs.AllocClientStatusRunning, - Metrics: &api.AllocationMetric{ - NodesEvaluated: 10, - NodesFiltered: 5, - NodesExhausted: 1, - DimensionExhausted: map[string]int{ - "cpu": 1, - }, - ConstraintFiltered: map[string]int{ - "$attr.kernel.name = linux": 1, - }, - ClassExhausted: map[string]int{ - "web-large": 1, - }, - }, - } - dumpAllocStatus(ui, alloc, fullId) - - // Check the output - out := ui.OutputWriter.String() - if !strings.Contains(out, "87654321-abcd-efab-cdef-123456789abc") { - t.Fatalf("missing alloc\n\n%s", out) - } - if !strings.Contains(out, structs.AllocClientStatusRunning) { - t.Fatalf("missing status\n\n%s", out) - } - if !strings.Contains(out, "5/10") { - t.Fatalf("missing filter stats\n\n%s", out) - } - if !strings.Contains( - out, `Constraint "$attr.kernel.name = linux" filtered 1 nodes`) { - t.Fatalf("missing constraint\n\n%s", out) - } - if !strings.Contains(out, "Resources exhausted on 1 nodes") { - t.Fatalf("missing resource exhaustion\n\n%s", out) - } - if !strings.Contains(out, `Class "web-large" exhausted on 1 nodes`) { - t.Fatalf("missing class exhaustion\n\n%s", out) - } - if !strings.Contains(out, `Dimension "cpu" exhausted on 1 nodes`) { - t.Fatalf("missing dimension exhaustion\n\n%s", out) - } - ui.OutputWriter.Reset() - - // Dumping alloc status with no eligible nodes adds a warning - alloc.Metrics.NodesEvaluated = 0 - dumpAllocStatus(ui, alloc, shortId) - - // Check the output - out = ui.OutputWriter.String() - if !strings.Contains(out, "No nodes were eligible") { - t.Fatalf("missing eligibility warning\n\n%s", out) - } - if strings.Contains(out, "87654321-abcd-efab-cdef-123456789abc") { - t.Fatalf("expected truncated id, got %s", out) - } - if !strings.Contains(out, "87654321") { - t.Fatalf("expected alloc id, got %s", out) - } -} diff --git a/command/namespace_delete.go b/command/namespace_delete.go index cc5c02e47..5f9e11447 100644 --- a/command/namespace_delete.go +++ b/command/namespace_delete.go @@ -29,7 +29,7 @@ func (c *NamespaceDeleteCommand) AutocompleteFlags() complete.Flags { } func (c *NamespaceDeleteCommand) AutocompleteArgs() complete.Predictor { - filter := map[string]struct{}{"default": struct{}{}} + filter := map[string]struct{}{"default": {}} return NamespacePredictor(c.Meta.Client, filter) } diff --git a/command/node_drain.go b/command/node_drain.go index e3cf3db73..b64b13ad5 100644 --- a/command/node_drain.go +++ b/command/node_drain.go @@ -105,7 +105,7 @@ func (c *NodeDrainCommand) Run(args []string) int { } // If -self flag is set then determine the current node. - nodeID := "" + var nodeID string if !self { nodeID = args[0] } else { diff --git a/command/node_status.go b/command/node_status.go index 8ccbd32e5..e14ea6fca 100644 --- a/command/node_status.go +++ b/command/node_status.go @@ -8,7 +8,6 @@ import ( "time" humanize "github.com/dustin/go-humanize" - "github.com/mitchellh/colorstring" "github.com/posener/complete" "github.com/hashicorp/nomad/api" @@ -26,7 +25,6 @@ const ( type NodeStatusCommand struct { Meta - color *colorstring.Colorize length int short bool verbose bool @@ -221,7 +219,7 @@ func (c *NodeStatusCommand) Run(args []string) int { } // Query the specific node - nodeID := "" + var nodeID string if !c.self { nodeID = args[0] } else { diff --git a/command/plan.go b/command/plan.go index b712448fd..e76fac0cf 100644 --- a/command/plan.go +++ b/command/plan.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/scheduler" - "github.com/mitchellh/colorstring" "github.com/posener/complete" ) @@ -26,7 +25,6 @@ potentially invalid.` type PlanCommand struct { Meta JobGetter - color *colorstring.Colorize } func (c *PlanCommand) Help() string { diff --git a/command/run.go b/command/run.go index 4c0195fca..e4d691239 100644 --- a/command/run.go +++ b/command/run.go @@ -1,8 +1,6 @@ package command import ( - "bytes" - "encoding/gob" "encoding/json" "fmt" "os" @@ -13,7 +11,6 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/nomad/structs" "github.com/posener/complete" ) @@ -280,19 +277,3 @@ func parseCheckIndex(input string) (uint64, bool, error) { u, err := strconv.ParseUint(input, 10, 64) return u, true, err } - -// convertStructJob is used to take a *structs.Job and convert it to an *api.Job. -// This function is just a hammer and probably needs to be revisited. -func convertStructJob(in *structs.Job) (*api.Job, error) { - gob.Register([]map[string]interface{}{}) - gob.Register([]interface{}{}) - var apiJob *api.Job - buf := new(bytes.Buffer) - if err := gob.NewEncoder(buf).Encode(in); err != nil { - return nil, err - } - if err := gob.NewDecoder(buf).Decode(&apiJob); err != nil { - return nil, err - } - return apiJob, nil -} diff --git a/demo/digitalocean/app/bench.go b/demo/digitalocean/app/bench.go index 427803b3b..a8c2f9706 100644 --- a/demo/digitalocean/app/bench.go +++ b/demo/digitalocean/app/bench.go @@ -19,7 +19,7 @@ func main() { return } - total := 0 + var total int if len(os.Args) != 2 { fmt.Println("need 1 arg") return diff --git a/helper/fields/data_test.go b/helper/fields/data_test.go index 6951261cd..01285ec2e 100644 --- a/helper/fields/data_test.go +++ b/helper/fields/data_test.go @@ -14,7 +14,7 @@ func TestFieldDataGet(t *testing.T) { }{ "string type, string value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeString}, + "foo": {Type: TypeString}, }, map[string]interface{}{ "foo": "bar", @@ -25,7 +25,7 @@ func TestFieldDataGet(t *testing.T) { "string type, int value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeInt}, + "foo": {Type: TypeInt}, }, map[string]interface{}{ "foo": 42, @@ -36,7 +36,7 @@ func TestFieldDataGet(t *testing.T) { "string type, unset value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeString}, + "foo": {Type: TypeString}, }, map[string]interface{}{}, "foo", @@ -45,7 +45,7 @@ func TestFieldDataGet(t *testing.T) { "string type, unset value with default": { map[string]*FieldSchema{ - "foo": &FieldSchema{ + "foo": { Type: TypeString, Default: "bar", }, @@ -57,7 +57,7 @@ func TestFieldDataGet(t *testing.T) { "int type, int value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeInt}, + "foo": {Type: TypeInt}, }, map[string]interface{}{ "foo": 42, @@ -68,7 +68,7 @@ func TestFieldDataGet(t *testing.T) { "bool type, bool value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeBool}, + "foo": {Type: TypeBool}, }, map[string]interface{}{ "foo": false, @@ -79,7 +79,7 @@ func TestFieldDataGet(t *testing.T) { "map type, map value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeMap}, + "foo": {Type: TypeMap}, }, map[string]interface{}{ "foo": map[string]interface{}{ @@ -94,7 +94,7 @@ func TestFieldDataGet(t *testing.T) { "array type, array value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeArray}, + "foo": {Type: TypeArray}, }, map[string]interface{}{ "foo": []interface{}{}, diff --git a/helper/flatmap/flatmap.go b/helper/flatmap/flatmap.go index 622ef7658..4f4ec0dfd 100644 --- a/helper/flatmap/flatmap.go +++ b/helper/flatmap/flatmap.go @@ -113,23 +113,17 @@ func flatten(prefix string, v reflect.Value, primitiveOnly, enteredStruct bool, // getSubPrefix takes the current prefix and the next subfield and returns an // appropriate prefix. func getSubPrefix(curPrefix, subField string) string { - newPrefix := "" if curPrefix != "" { - newPrefix = fmt.Sprintf("%s.%s", curPrefix, subField) - } else { - newPrefix = fmt.Sprintf("%s", subField) + return fmt.Sprintf("%s.%s", curPrefix, subField) } - return newPrefix + return fmt.Sprintf("%s", subField) } // getSubKeyPrefix takes the current prefix and the next subfield and returns an // appropriate prefix for a map field. func getSubKeyPrefix(curPrefix, subField string) string { - newPrefix := "" if curPrefix != "" { - newPrefix = fmt.Sprintf("%s[%s]", curPrefix, subField) - } else { - newPrefix = fmt.Sprintf("%s", subField) + return fmt.Sprintf("%s[%s]", curPrefix, subField) } - return newPrefix + return fmt.Sprintf("%s", subField) } diff --git a/helper/flatmap/flatmap_test.go b/helper/flatmap/flatmap_test.go index 125155152..821060ff8 100644 --- a/helper/flatmap/flatmap_test.go +++ b/helper/flatmap/flatmap_test.go @@ -167,10 +167,10 @@ func TestFlatMap(t *testing.T) { Input: &containers{ myslice: []int{1, 2}, mymap: map[string]linkedList{ - "foo": linkedList{ + "foo": { value: "l1", }, - "bar": linkedList{ + "bar": { value: "l2", }, }, @@ -188,10 +188,10 @@ func TestFlatMap(t *testing.T) { Input: &containers{ myslice: []int{1, 2}, mymap: map[string]linkedList{ - "foo": linkedList{ + "foo": { value: "l1", }, - "bar": linkedList{ + "bar": { value: "l2", }, }, diff --git a/helper/funcs.go b/helper/funcs.go index 0b0796059..19911941f 100644 --- a/helper/funcs.go +++ b/helper/funcs.go @@ -180,7 +180,7 @@ func CopyMapStringStruct(m map[string]struct{}) map[string]struct{} { } c := make(map[string]struct{}, l) - for k, _ := range m { + for k := range m { c[k] = struct{}{} } return c diff --git a/helper/funcs_test.go b/helper/funcs_test.go index 03bca8a1f..564765c62 100644 --- a/helper/funcs_test.go +++ b/helper/funcs_test.go @@ -23,8 +23,8 @@ func TestSliceStringIsSubset(t *testing.T) { func TestMapStringStringSliceValueSet(t *testing.T) { m := map[string][]string{ - "foo": []string{"1", "2"}, - "bar": []string{"3"}, + "foo": {"1", "2"}, + "bar": {"3"}, "baz": nil, } @@ -38,8 +38,8 @@ func TestMapStringStringSliceValueSet(t *testing.T) { func TestCopyMapStringSliceString(t *testing.T) { m := map[string][]string{ - "x": []string{"a", "b", "c"}, - "y": []string{"1", "2", "3"}, + "x": {"a", "b", "c"}, + "y": {"1", "2", "3"}, "z": nil, } diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index c0cb4d902..9196f4a3c 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -39,7 +39,7 @@ func TestParse(t *testing.T) { }, Constraints: []*api.Constraint{ - &api.Constraint{ + { LTarget: "kernel.os", RTarget: "windows", Operand: "=", @@ -57,10 +57,10 @@ func TestParse(t *testing.T) { }, TaskGroups: []*api.TaskGroup{ - &api.TaskGroup{ + { Name: helper.StringToPtr("outside"), Tasks: []*api.Task{ - &api.Task{ + { Name: "outside", Driver: "java", Config: map[string]interface{}{ @@ -73,11 +73,11 @@ func TestParse(t *testing.T) { }, }, - &api.TaskGroup{ + { Name: helper.StringToPtr("binsl"), Count: helper.IntToPtr(5), Constraints: []*api.Constraint{ - &api.Constraint{ + { LTarget: "kernel.os", RTarget: "linux", Operand: "=", @@ -107,14 +107,14 @@ func TestParse(t *testing.T) { Canary: helper.IntToPtr(2), }, Tasks: []*api.Task{ - &api.Task{ + { Name: "binstore", Driver: "docker", User: "bob", Config: map[string]interface{}{ "image": "hashicorp/binstore", "labels": []map[string]interface{}{ - map[string]interface{}{ + { "FOO": "bar", }, }, @@ -147,7 +147,7 @@ func TestParse(t *testing.T) { CPU: helper.IntToPtr(500), MemoryMB: helper.IntToPtr(128), Networks: []*api.NetworkResource{ - &api.NetworkResource{ + { MBits: helper.IntToPtr(100), ReservedPorts: []api.Port{{Label: "one", Value: 1}, {Label: "two", Value: 2}, {Label: "three", Value: 3}}, DynamicPorts: []api.Port{{Label: "http", Value: 0}, {Label: "https", Value: 0}, {Label: "admin", Value: 0}}, @@ -204,7 +204,7 @@ func TestParse(t *testing.T) { }, Leader: true, }, - &api.Task{ + { Name: "storagelocker", Driver: "docker", User: "", @@ -217,7 +217,7 @@ func TestParse(t *testing.T) { IOPS: helper.IntToPtr(30), }, Constraints: []*api.Constraint{ - &api.Constraint{ + { LTarget: "kernel.arch", RTarget: "amd64", Operand: "=", @@ -270,7 +270,7 @@ func TestParse(t *testing.T) { ID: helper.StringToPtr("foo"), Name: helper.StringToPtr("foo"), Constraints: []*api.Constraint{ - &api.Constraint{ + { LTarget: "$attr.kernel.version", RTarget: "~> 3.2", Operand: structs.ConstraintVersion, @@ -286,7 +286,7 @@ func TestParse(t *testing.T) { ID: helper.StringToPtr("foo"), Name: helper.StringToPtr("foo"), Constraints: []*api.Constraint{ - &api.Constraint{ + { LTarget: "$attr.kernel.version", RTarget: "[0-9.]+", Operand: structs.ConstraintRegex, @@ -302,7 +302,7 @@ func TestParse(t *testing.T) { ID: helper.StringToPtr("foo"), Name: helper.StringToPtr("foo"), Constraints: []*api.Constraint{ - &api.Constraint{ + { LTarget: "$meta.data", RTarget: "foo,bar,baz", Operand: structs.ConstraintSetContains, @@ -318,7 +318,7 @@ func TestParse(t *testing.T) { ID: helper.StringToPtr("foo"), Name: helper.StringToPtr("foo"), Constraints: []*api.Constraint{ - &api.Constraint{ + { Operand: structs.ConstraintDistinctHosts, }, }, @@ -332,7 +332,7 @@ func TestParse(t *testing.T) { ID: helper.StringToPtr("foo"), Name: helper.StringToPtr("foo"), Constraints: []*api.Constraint{ - &api.Constraint{ + { Operand: structs.ConstraintDistinctProperty, LTarget: "${meta.rack}", }, @@ -371,16 +371,16 @@ func TestParse(t *testing.T) { ID: helper.StringToPtr("foo"), Name: helper.StringToPtr("foo"), TaskGroups: []*api.TaskGroup{ - &api.TaskGroup{ + { Name: helper.StringToPtr("bar"), Tasks: []*api.Task{ - &api.Task{ + { Name: "bar", Driver: "docker", Config: map[string]interface{}{ "image": "hashicorp/image", "port_map": []map[string]interface{}{ - map[string]interface{}{ + { "db": 1234, }, }, @@ -405,10 +405,10 @@ func TestParse(t *testing.T) { ID: helper.StringToPtr("binstore-storagelocker"), Name: helper.StringToPtr("binstore-storagelocker"), TaskGroups: []*api.TaskGroup{ - &api.TaskGroup{ + { Name: helper.StringToPtr("binsl"), Tasks: []*api.Task{ - &api.Task{ + { Name: "binstore", Driver: "docker", Artifacts: []*api.TaskArtifact{ @@ -442,11 +442,11 @@ func TestParse(t *testing.T) { Name: helper.StringToPtr("check_initial_status"), Type: helper.StringToPtr("service"), TaskGroups: []*api.TaskGroup{ - &api.TaskGroup{ + { Name: helper.StringToPtr("group"), Count: helper.IntToPtr(1), Tasks: []*api.Task{ - &api.Task{ + { Name: "task", Services: []*api.Service{ { @@ -492,10 +492,10 @@ func TestParse(t *testing.T) { ID: helper.StringToPtr("example"), Name: helper.StringToPtr("example"), TaskGroups: []*api.TaskGroup{ - &api.TaskGroup{ + { Name: helper.StringToPtr("cache"), Tasks: []*api.Task{ - &api.Task{ + { Name: "redis", Vault: &api.Vault{ Policies: []string{"group"}, @@ -503,7 +503,7 @@ func TestParse(t *testing.T) { ChangeMode: helper.StringToPtr(structs.VaultChangeModeRestart), }, }, - &api.Task{ + { Name: "redis2", Vault: &api.Vault{ Policies: []string{"task"}, @@ -513,10 +513,10 @@ func TestParse(t *testing.T) { }, }, }, - &api.TaskGroup{ + { Name: helper.StringToPtr("cache2"), Tasks: []*api.Task{ - &api.Task{ + { Name: "redis", Vault: &api.Vault{ Policies: []string{"job"}, diff --git a/main.go b/main.go index 50d9ac5b4..3296d2442 100644 --- a/main.go +++ b/main.go @@ -27,7 +27,7 @@ func Run(args []string) int { func RunCustom(args []string, commands map[string]cli.CommandFactory) int { // Build the commands to include in the help now. commandsInclude := make([]string, 0, len(commands)) - for k, _ := range commands { + for k := range commands { switch k { case "deployment list", "deployment status", "deployment pause", "deployment resume", "deployment fail", "deployment promote": diff --git a/nomad/acl_endpoint.go b/nomad/acl_endpoint.go index 1cbc13825..c7b56bd11 100644 --- a/nomad/acl_endpoint.go +++ b/nomad/acl_endpoint.go @@ -283,7 +283,7 @@ func (a *ACL) Bootstrap(args *structs.ACLTokenBootstrapRequest, reply *structs.A } defer metrics.MeasureSince([]string{"nomad", "acl", "bootstrap"}, time.Now()) - // Always ignore the reset index from the arguements + // Always ignore the reset index from the arguments args.ResetIndex = 0 // Snapshot the state diff --git a/nomad/acl_testutil_test.go b/nomad/acl_testutil_test.go index 705290126..2e7e511a2 100644 --- a/nomad/acl_testutil_test.go +++ b/nomad/acl_testutil_test.go @@ -11,7 +11,7 @@ import ( ) // NamespacePolicy is a helper for generating the policy hcl for a given -// namepsace. Either policy or capabilites may be nil but not both. +// namepsace. Either policy or capabilities may be nil but not both. func NamespacePolicy(namespace string, policy string, capabilities []string) string { policyHCL := fmt.Sprintf("namespace %q {", namespace) if policy != "" { diff --git a/nomad/fsm_test.go b/nomad/fsm_test.go index bf32749ef..bfd16b4f2 100644 --- a/nomad/fsm_test.go +++ b/nomad/fsm_test.go @@ -356,7 +356,7 @@ func TestFSM_RegisterJob_BadNamespace(t *testing.T) { if !ok { t.Fatalf("resp not of error type: %T %v", resp, resp) } - if !strings.Contains(err.Error(), "non-existant namespace") { + if !strings.Contains(err.Error(), "non-existent namespace") { t.Fatalf("bad error: %v", err) } @@ -1362,11 +1362,11 @@ func TestFSM_DeploymentPromotion(t *testing.T) { d := mock.Deployment() d.JobID = j.ID d.TaskGroups = map[string]*structs.DeploymentState{ - "web": &structs.DeploymentState{ + "web": { DesiredTotal: 10, DesiredCanaries: 1, }, - "foo": &structs.DeploymentState{ + "foo": { DesiredTotal: 10, DesiredCanaries: 1, }, @@ -2156,7 +2156,7 @@ func TestFSM_SnapshotRestore_AddMissingSummary(t *testing.T) { JobID: alloc.Job.ID, Namespace: alloc.Job.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{ + "web": { Starting: 1, }, }, @@ -2210,7 +2210,7 @@ func TestFSM_ReconcileSummaries(t *testing.T) { JobID: job1.ID, Namespace: job1.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{ + "web": { Queued: 10, }, }, @@ -2229,7 +2229,7 @@ func TestFSM_ReconcileSummaries(t *testing.T) { JobID: alloc.Job.ID, Namespace: alloc.Job.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{ + "web": { Queued: 9, Starting: 1, }, diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 5cac4b670..d23c74024 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -160,7 +160,7 @@ func TestJobEndpoint_Register_InvalidNamespace(t *testing.T) { // Try without a token, expect failure var resp structs.JobRegisterResponse err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp) - if err == nil || !strings.Contains(err.Error(), "non-existant namespace") { + if err == nil || !strings.Contains(err.Error(), "non-existent namespace") { t.Fatalf("expected namespace error: %v", err) } @@ -2096,7 +2096,7 @@ func TestJobEndpoint_GetJobSummary(t *testing.T) { JobID: job.ID, Namespace: job.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{}, + "web": {}, }, Children: new(structs.JobChildrenSummary), CreateIndex: job.CreateIndex, @@ -2158,7 +2158,7 @@ func TestJobEndpoint_Summary_ACL(t *testing.T) { JobID: job.ID, Namespace: job.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{}, + "web": {}, }, Children: new(structs.JobChildrenSummary), CreateIndex: job.CreateIndex, @@ -2248,7 +2248,6 @@ func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) { } start = time.Now() var resp1 structs.JobSummaryResponse - start = time.Now() if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp1); err != nil { t.Fatalf("err: %v", err) } @@ -3063,7 +3062,7 @@ func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) { job := mock.Job() signal := "SIGUSR1" job.TaskGroups[0].Tasks[0].Templates = []*structs.Template{ - &structs.Template{ + { SourcePath: "foo", DestPath: "bar", ChangeMode: structs.TemplateChangeModeSignal, diff --git a/nomad/mock/mock.go b/nomad/mock/mock.go index d7ac2bc3d..cf11794f1 100644 --- a/nomad/mock/mock.go +++ b/nomad/mock/mock.go @@ -25,7 +25,7 @@ func Node() *structs.Node { DiskMB: 100 * 1024, IOPS: 150, Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ + { Device: "eth0", CIDR: "192.168.0.100/32", MBits: 1000, @@ -37,7 +37,7 @@ func Node() *structs.Node { MemoryMB: 256, DiskMB: 4 * 1024, Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ + { Device: "eth0", IP: "192.168.0.100", ReservedPorts: []structs.Port{{Label: "main", Value: 22}}, @@ -71,14 +71,14 @@ func Job() *structs.Job { AllAtOnce: false, Datacenters: []string{"dc1"}, Constraints: []*structs.Constraint{ - &structs.Constraint{ + { LTarget: "${attr.kernel.name}", RTarget: "linux", Operand: "=", }, }, TaskGroups: []*structs.TaskGroup{ - &structs.TaskGroup{ + { Name: "web", Count: 10, EphemeralDisk: &structs.EphemeralDisk{ @@ -91,7 +91,7 @@ func Job() *structs.Job { Mode: structs.RestartPolicyModeDelay, }, Tasks: []*structs.Task{ - &structs.Task{ + { Name: "web", Driver: "exec", Config: map[string]interface{}{ @@ -126,7 +126,7 @@ func Job() *structs.Job { CPU: 500, MemoryMB: 256, Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ + { MBits: 50, DynamicPorts: []structs.Port{{Label: "http"}, {Label: "admin"}}, }, @@ -168,14 +168,14 @@ func SystemJob() *structs.Job { AllAtOnce: false, Datacenters: []string{"dc1"}, Constraints: []*structs.Constraint{ - &structs.Constraint{ + { LTarget: "${attr.kernel.name}", RTarget: "linux", Operand: "=", }, }, TaskGroups: []*structs.TaskGroup{ - &structs.TaskGroup{ + { Name: "web", Count: 1, RestartPolicy: &structs.RestartPolicy{ @@ -186,7 +186,7 @@ func SystemJob() *structs.Job { }, EphemeralDisk: structs.DefaultEphemeralDisk(), Tasks: []*structs.Task{ - &structs.Task{ + { Name: "web", Driver: "exec", Config: map[string]interface{}{ @@ -197,7 +197,7 @@ func SystemJob() *structs.Job { CPU: 500, MemoryMB: 256, Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ + { MBits: 50, DynamicPorts: []structs.Port{{Label: "http"}}, }, @@ -269,7 +269,7 @@ func Alloc() *structs.Allocation { MemoryMB: 256, DiskMB: 150, Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ + { Device: "eth0", IP: "192.168.0.100", ReservedPorts: []structs.Port{{Label: "main", Value: 5000}}, @@ -279,11 +279,11 @@ func Alloc() *structs.Allocation { }, }, TaskResources: map[string]*structs.Resources{ - "web": &structs.Resources{ + "web": { CPU: 500, MemoryMB: 256, Networks: []*structs.NetworkResource{ - &structs.NetworkResource{ + { Device: "eth0", IP: "192.168.0.100", ReservedPorts: []structs.Port{{Label: "main", Value: 5000}}, @@ -323,7 +323,7 @@ func Deployment() *structs.Deployment { JobModifyIndex: 20, JobCreateIndex: 18, TaskGroups: map[string]*structs.DeploymentState{ - "web": &structs.DeploymentState{ + "web": { DesiredTotal: 10, }, }, diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index d20d8157c..ddc55bfe5 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -813,7 +813,7 @@ func TestClientEndpoint_Drain_Down(t *testing.T) { JobID: job.ID, Namespace: job.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{ + "web": { Queued: 1, Lost: 1, }, @@ -834,7 +834,7 @@ func TestClientEndpoint_Drain_Down(t *testing.T) { JobID: job1.ID, Namespace: job1.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{ + "web": { Lost: 1, }, }, diff --git a/nomad/operator_endpoint_test.go b/nomad/operator_endpoint_test.go index 30455fcd0..db1ba9397 100644 --- a/nomad/operator_endpoint_test.go +++ b/nomad/operator_endpoint_test.go @@ -39,7 +39,7 @@ func TestOperator_RaftGetConfiguration(t *testing.T) { me := future.Configuration().Servers[0] expected := structs.RaftConfigurationResponse{ Servers: []*structs.RaftServer{ - &structs.RaftServer{ + { ID: me.ID, Node: fmt.Sprintf("%v.%v", s1.config.NodeName, s1.config.Region), Address: me.Address, diff --git a/nomad/periodic.go b/nomad/periodic.go index a1b358b12..315c37871 100644 --- a/nomad/periodic.go +++ b/nomad/periodic.go @@ -190,7 +190,7 @@ func (p *PeriodicDispatch) Tracked() []*structs.Job { // Add begins tracking of a periodic job. If it is already tracked, it acts as // an update to the jobs periodic spec. The method returns whether the job was -// added and any error that may have occured. +// added and any error that may have occurred. func (p *PeriodicDispatch) Add(job *structs.Job) (added bool, err error) { p.l.Lock() defer p.l.Unlock() diff --git a/nomad/periodic_test.go b/nomad/periodic_test.go index 1db9c07d6..7e3f07658 100644 --- a/nomad/periodic_test.go +++ b/nomad/periodic_test.go @@ -188,7 +188,7 @@ func TestPeriodicDispatch_Add_UpdateJob(t *testing.T) { // Update the job and add it again. job.Periodic.Spec = "foo" if added, err := p.Add(job); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed: %v %v", added, err) } tracked = p.Tracked() @@ -228,7 +228,7 @@ func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) { p, _ := testPeriodicDispatcher() job := mock.PeriodicJob() if added, err := p.Add(job); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } tracked := p.Tracked() @@ -239,7 +239,7 @@ func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) { // Update the job to be non-periodic and add it again. job.Periodic = nil if added, err := p.Add(job); err != nil || added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } tracked = p.Tracked() @@ -257,14 +257,14 @@ func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) { // Add it. if added, err := p.Add(job); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } // Update it to be sooner and re-add. expected := time.Now().Round(1 * time.Second).Add(1 * time.Second) job.Periodic.Spec = fmt.Sprintf("%d", expected.Unix()) if added, err := p.Add(job); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } // Check that nothing is created. @@ -305,7 +305,7 @@ func TestPeriodicDispatch_Remove_Tracked(t *testing.T) { job := mock.PeriodicJob() if added, err := p.Add(job); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } tracked := p.Tracked() @@ -332,12 +332,12 @@ func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) { // Add it. if added, err := p.Add(job); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } // Remove the job. if err := p.Remove(job.Namespace, job.ID); err != nil { - t.Fatalf("Add failed %v", err) + t.Fatalf("Remove failed %v", err) } time.Sleep(2 * time.Second) @@ -371,7 +371,7 @@ func TestPeriodicDispatch_ForceRun_Tracked(t *testing.T) { // Add it. if added, err := p.Add(job); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } // ForceRun the job @@ -403,7 +403,7 @@ func TestPeriodicDispatch_Run_DisallowOverlaps(t *testing.T) { // Add it. if added, err := p.Add(job); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } time.Sleep(3 * time.Second) @@ -432,7 +432,7 @@ func TestPeriodicDispatch_Run_Multiple(t *testing.T) { // Add it. if added, err := p.Add(job); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } time.Sleep(3 * time.Second) @@ -464,10 +464,10 @@ func TestPeriodicDispatch_Run_SameTime(t *testing.T) { // Add them. if added, err := p.Add(job); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } if added, err := p.Add(job2); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } if l := len(p.Tracked()); l != 2 { @@ -504,10 +504,10 @@ func TestPeriodicDispatch_Run_SameID_Different_Namespace(t *testing.T) { // Add them. if added, err := p.Add(job); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } if added, err := p.Add(job2); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } if l := len(p.Tracked()); l != 2 { @@ -570,11 +570,11 @@ func TestPeriodicDispatch_Complex(t *testing.T) { // Create a map of expected eval job ids. expected := map[string][]time.Time{ - job1.ID: []time.Time{same}, - job2.ID: []time.Time{same}, + job1.ID: {same}, + job2.ID: {same}, job3.ID: nil, - job4.ID: []time.Time{launch1, launch3}, - job5.ID: []time.Time{launch2}, + job4.ID: {launch1, launch3}, + job5.ID: {launch2}, job6.ID: nil, job7.ID: nil, job8.ID: nil, @@ -588,7 +588,7 @@ func TestPeriodicDispatch_Complex(t *testing.T) { for _, job := range jobs { if added, err := p.Add(job); err != nil || !added { - t.Fatalf("Add failed %v", added, err) + t.Fatalf("Add failed %v %v", added, err) } } diff --git a/nomad/plan_apply.go b/nomad/plan_apply.go index e82595bc5..83e50f8e7 100644 --- a/nomad/plan_apply.go +++ b/nomad/plan_apply.go @@ -405,7 +405,6 @@ func evaluateNodePlan(snap *state.StateSnapshot, plan *structs.Plan, nodeID stri // Determine the proposed allocation by first removing allocations // that are planned evictions and adding the new allocations. - proposed := existingAlloc var remove []*structs.Allocation if update := plan.NodeUpdate[nodeID]; len(update) > 0 { remove = append(remove, update...) @@ -415,7 +414,7 @@ func evaluateNodePlan(snap *state.StateSnapshot, plan *structs.Plan, nodeID stri remove = append(remove, alloc) } } - proposed = structs.RemoveAllocs(existingAlloc, remove) + proposed := structs.RemoveAllocs(existingAlloc, remove) proposed = append(proposed, plan.NodeAllocation[nodeID]...) // Check if these allocations fit diff --git a/nomad/plan_apply_pool_test.go b/nomad/plan_apply_pool_test.go index 0a6775c87..14812e985 100644 --- a/nomad/plan_apply_pool_test.go +++ b/nomad/plan_apply_pool_test.go @@ -17,7 +17,7 @@ func TestEvaluatePool(t *testing.T) { alloc := mock.Alloc() plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc}, + node.ID: {alloc}, }, } diff --git a/nomad/plan_apply_test.go b/nomad/plan_apply_test.go index 853fb00dd..1fca16e60 100644 --- a/nomad/plan_apply_test.go +++ b/nomad/plan_apply_test.go @@ -92,7 +92,7 @@ func TestPlanApply_applyPlan(t *testing.T) { s1.State().UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)) planRes := &structs.PlanResult{ NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc}, + node.ID: {alloc}, }, Deployment: dnew, DeploymentUpdates: updates, @@ -177,10 +177,10 @@ func TestPlanApply_applyPlan(t *testing.T) { s1.State().UpsertJobSummary(1500, mock.JobSummary(alloc2.JobID)) planRes = &structs.PlanResult{ NodeUpdate: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{allocEvict}, + node.ID: {allocEvict}, }, NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc2}, + node.ID: {alloc2}, }, } @@ -248,7 +248,7 @@ func TestPlanApply_EvalPlan_Simple(t *testing.T) { alloc := mock.Alloc() plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc}, + node.ID: {alloc}, }, Deployment: mock.Deployment(), DeploymentUpdates: []*structs.DeploymentStatusUpdate{ @@ -300,8 +300,8 @@ func TestPlanApply_EvalPlan_Partial(t *testing.T) { plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc}, - node2.ID: []*structs.Allocation{alloc2}, + node.ID: {alloc}, + node2.ID: {alloc2}, }, Deployment: d, } @@ -353,8 +353,8 @@ func TestPlanApply_EvalPlan_Partial_AllAtOnce(t *testing.T) { plan := &structs.Plan{ AllAtOnce: true, // Require all to make progress NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc}, - node2.ID: []*structs.Allocation{alloc2}, + node.ID: {alloc}, + node2.ID: {alloc2}, }, Deployment: mock.Deployment(), DeploymentUpdates: []*structs.DeploymentStatusUpdate{ @@ -398,7 +398,7 @@ func TestPlanApply_EvalNodePlan_Simple(t *testing.T) { alloc := mock.Alloc() plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc}, + node.ID: {alloc}, }, } @@ -425,7 +425,7 @@ func TestPlanApply_EvalNodePlan_NodeNotReady(t *testing.T) { alloc := mock.Alloc() plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc}, + node.ID: {alloc}, }, } @@ -452,7 +452,7 @@ func TestPlanApply_EvalNodePlan_NodeDrain(t *testing.T) { alloc := mock.Alloc() plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc}, + node.ID: {alloc}, }, } @@ -477,7 +477,7 @@ func TestPlanApply_EvalNodePlan_NodeNotExist(t *testing.T) { alloc := mock.Alloc() plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ - nodeID: []*structs.Allocation{alloc}, + nodeID: {alloc}, }, } @@ -512,7 +512,7 @@ func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) { snap, _ := state.Snapshot() plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc2}, + node.ID: {alloc2}, }, } @@ -542,7 +542,7 @@ func TestPlanApply_EvalNodePlan_UpdateExisting(t *testing.T) { plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc}, + node.ID: {alloc}, }, } @@ -576,10 +576,10 @@ func TestPlanApply_EvalNodePlan_NodeFull_Evict(t *testing.T) { alloc2 := mock.Alloc() plan := &structs.Plan{ NodeUpdate: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{allocEvict}, + node.ID: {allocEvict}, }, NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc2}, + node.ID: {alloc2}, }, } @@ -611,7 +611,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) { alloc2 := mock.Alloc() plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc2}, + node.ID: {alloc2}, }, } @@ -645,7 +645,7 @@ func TestPlanApply_EvalNodePlan_NodeDown_EvictOnly(t *testing.T) { allocEvict.DesiredStatus = structs.AllocDesiredStatusEvict plan := &structs.Plan{ NodeUpdate: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{allocEvict}, + node.ID: {allocEvict}, }, } diff --git a/nomad/serf.go b/nomad/serf.go index ab14bf293..ad6c58d0c 100644 --- a/nomad/serf.go +++ b/nomad/serf.go @@ -153,7 +153,7 @@ func (s *Server) maybeBootstrap() { for attempt := uint(0); attempt < maxPeerRetries; attempt++ { if err := s.connPool.RPC(s.config.Region, server.Addr, server.MajorVersion, "Status.Peers", req, &peers); err != nil { - nextRetry := time.Duration((1 << attempt) * peerRetryBase) + nextRetry := (1 << attempt) * peerRetryBase s.logger.Printf("[ERR] consul: Failed to confirm peer status for %s: %v. Retrying in "+ "%v...", server.Name, err, nextRetry.String()) time.Sleep(nextRetry) diff --git a/nomad/server.go b/nomad/server.go index d958c08b6..cfe247c26 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -924,7 +924,7 @@ func (s *Server) setupRaft() error { // we add support for node IDs. configuration := raft.Configuration{ Servers: []raft.Server{ - raft.Server{ + { ID: raft.ServerID(trans.LocalAddr()), Address: trans.LocalAddr(), }, @@ -1069,7 +1069,7 @@ func (s *Server) Regions() []string { defer s.peerLock.RUnlock() regions := make([]string, 0, len(s.peers)) - for region, _ := range s.peers { + for region := range s.peers { regions = append(regions, region) } sort.Strings(regions) @@ -1131,7 +1131,7 @@ func (s *Server) Stats() map[string]map[string]string { return strconv.FormatUint(v, 10) } stats := map[string]map[string]string{ - "nomad": map[string]string{ + "nomad": { "server": "true", "leader": fmt.Sprintf("%v", s.IsLeader()), "leader_addr": string(s.raft.Leader()), diff --git a/nomad/state/notify.go b/nomad/state/notify.go index 180482369..7a8a8d792 100644 --- a/nomad/state/notify.go +++ b/nomad/state/notify.go @@ -18,7 +18,7 @@ type NotifyGroup struct { func (n *NotifyGroup) Notify() { n.l.Lock() defer n.l.Unlock() - for ch, _ := range n.notify { + for ch := range n.notify { select { case ch <- struct{}{}: default: diff --git a/nomad/state/schema.go b/nomad/state/schema.go index 2df96679d..2effff87c 100644 --- a/nomad/state/schema.go +++ b/nomad/state/schema.go @@ -69,7 +69,7 @@ func indexTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ Name: "index", Indexes: map[string]*memdb.IndexSchema{ - "id": &memdb.IndexSchema{ + "id": { Name: "id", AllowMissing: false, Unique: true, @@ -91,7 +91,7 @@ func nodeTableSchema() *memdb.TableSchema { // Primary index is used for node management // and simple direct lookup. ID is required to be // unique. - "id": &memdb.IndexSchema{ + "id": { Name: "id", AllowMissing: false, Unique: true, @@ -112,7 +112,7 @@ func jobTableSchema() *memdb.TableSchema { // Primary index is used for job management // and simple direct lookup. ID is required to be // unique within a namespace. - "id": &memdb.IndexSchema{ + "id": { Name: "id", AllowMissing: false, Unique: true, @@ -131,7 +131,7 @@ func jobTableSchema() *memdb.TableSchema { }, }, }, - "type": &memdb.IndexSchema{ + "type": { Name: "type", AllowMissing: false, Unique: false, @@ -140,7 +140,7 @@ func jobTableSchema() *memdb.TableSchema { Lowercase: false, }, }, - "gc": &memdb.IndexSchema{ + "gc": { Name: "gc", AllowMissing: false, Unique: false, @@ -148,7 +148,7 @@ func jobTableSchema() *memdb.TableSchema { Conditional: jobIsGCable, }, }, - "periodic": &memdb.IndexSchema{ + "periodic": { Name: "periodic", AllowMissing: false, Unique: false, @@ -165,7 +165,7 @@ func jobSummarySchema() *memdb.TableSchema { return &memdb.TableSchema{ Name: "job_summary", Indexes: map[string]*memdb.IndexSchema{ - "id": &memdb.IndexSchema{ + "id": { Name: "id", AllowMissing: false, Unique: true, @@ -194,7 +194,7 @@ func jobVersionSchema() *memdb.TableSchema { return &memdb.TableSchema{ Name: "job_version", Indexes: map[string]*memdb.IndexSchema{ - "id": &memdb.IndexSchema{ + "id": { Name: "id", AllowMissing: false, Unique: true, @@ -277,7 +277,7 @@ func deploymentSchema() *memdb.TableSchema { return &memdb.TableSchema{ Name: "deployment", Indexes: map[string]*memdb.IndexSchema{ - "id": &memdb.IndexSchema{ + "id": { Name: "id", AllowMissing: false, Unique: true, @@ -286,7 +286,7 @@ func deploymentSchema() *memdb.TableSchema { }, }, - "namespace": &memdb.IndexSchema{ + "namespace": { Name: "namespace", AllowMissing: false, Unique: false, @@ -296,7 +296,7 @@ func deploymentSchema() *memdb.TableSchema { }, // Job index is used to lookup deployments by job - "job": &memdb.IndexSchema{ + "job": { Name: "job", AllowMissing: false, Unique: false, @@ -328,7 +328,7 @@ func periodicLaunchTableSchema() *memdb.TableSchema { // Primary index is used for job management // and simple direct lookup. ID is required to be // unique. - "id": &memdb.IndexSchema{ + "id": { Name: "id", AllowMissing: false, Unique: true, @@ -359,7 +359,7 @@ func evalTableSchema() *memdb.TableSchema { Name: "evals", Indexes: map[string]*memdb.IndexSchema{ // Primary index is used for direct lookup. - "id": &memdb.IndexSchema{ + "id": { Name: "id", AllowMissing: false, Unique: true, @@ -368,7 +368,7 @@ func evalTableSchema() *memdb.TableSchema { }, }, - "namespace": &memdb.IndexSchema{ + "namespace": { Name: "namespace", AllowMissing: false, Unique: false, @@ -378,7 +378,7 @@ func evalTableSchema() *memdb.TableSchema { }, // Job index is used to lookup allocations by job - "job": &memdb.IndexSchema{ + "job": { Name: "job", AllowMissing: false, Unique: false, @@ -412,7 +412,7 @@ func allocTableSchema() *memdb.TableSchema { Name: "allocs", Indexes: map[string]*memdb.IndexSchema{ // Primary index is a UUID - "id": &memdb.IndexSchema{ + "id": { Name: "id", AllowMissing: false, Unique: true, @@ -421,7 +421,7 @@ func allocTableSchema() *memdb.TableSchema { }, }, - "namespace": &memdb.IndexSchema{ + "namespace": { Name: "namespace", AllowMissing: false, Unique: false, @@ -431,7 +431,7 @@ func allocTableSchema() *memdb.TableSchema { }, // Node index is used to lookup allocations by node - "node": &memdb.IndexSchema{ + "node": { Name: "node", AllowMissing: true, // Missing is allow for failed allocations Unique: false, @@ -460,7 +460,7 @@ func allocTableSchema() *memdb.TableSchema { }, // Job index is used to lookup allocations by job - "job": &memdb.IndexSchema{ + "job": { Name: "job", AllowMissing: false, Unique: false, @@ -479,7 +479,7 @@ func allocTableSchema() *memdb.TableSchema { }, // Eval index is used to lookup allocations by eval - "eval": &memdb.IndexSchema{ + "eval": { Name: "eval", AllowMissing: false, Unique: false, @@ -489,7 +489,7 @@ func allocTableSchema() *memdb.TableSchema { }, // Deployment index is used to lookup allocations by deployment - "deployment": &memdb.IndexSchema{ + "deployment": { Name: "deployment", AllowMissing: true, Unique: false, @@ -509,7 +509,7 @@ func vaultAccessorTableSchema() *memdb.TableSchema { Name: "vault_accessors", Indexes: map[string]*memdb.IndexSchema{ // The primary index is the accessor id - "id": &memdb.IndexSchema{ + "id": { Name: "id", AllowMissing: false, Unique: true, @@ -518,7 +518,7 @@ func vaultAccessorTableSchema() *memdb.TableSchema { }, }, - "alloc_id": &memdb.IndexSchema{ + "alloc_id": { Name: "alloc_id", AllowMissing: false, Unique: false, @@ -527,7 +527,7 @@ func vaultAccessorTableSchema() *memdb.TableSchema { }, }, - "node_id": &memdb.IndexSchema{ + "node_id": { Name: "node_id", AllowMissing: false, Unique: false, @@ -545,7 +545,7 @@ func aclPolicyTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ Name: "acl_policy", Indexes: map[string]*memdb.IndexSchema{ - "id": &memdb.IndexSchema{ + "id": { Name: "id", AllowMissing: false, Unique: true, @@ -563,7 +563,7 @@ func aclTokenTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ Name: "acl_token", Indexes: map[string]*memdb.IndexSchema{ - "id": &memdb.IndexSchema{ + "id": { Name: "id", AllowMissing: false, Unique: true, @@ -571,7 +571,7 @@ func aclTokenTableSchema() *memdb.TableSchema { Field: "AccessorID", }, }, - "secret": &memdb.IndexSchema{ + "secret": { Name: "secret", AllowMissing: false, Unique: true, @@ -579,7 +579,7 @@ func aclTokenTableSchema() *memdb.TableSchema { Field: "SecretID", }, }, - "global": &memdb.IndexSchema{ + "global": { Name: "global", AllowMissing: false, Unique: false, diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index f01df88d5..9056841d6 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -673,7 +673,7 @@ func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion b if exists, err := s.namespaceExists(txn, job.Namespace); err != nil { return err } else if !exists { - return fmt.Errorf("job %q is in non-existant namespace %q", job.ID, job.Namespace) + return fmt.Errorf("job %q is in non-existent namespace %q", job.ID, job.Namespace) } // Check if the job already exists diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index f2e06ece5..b5657aaaa 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -1167,7 +1167,7 @@ func TestStateStore_UpsertJob_BadNamespace(t *testing.T) { job.Namespace = "foo" err := state.UpsertJob(1000, job) - assert.Contains(err.Error(), "non-existant namespace") + assert.Contains(err.Error(), "non-existent namespace") ws := memdb.NewWatchSet() out, err := state.JobByID(ws, job.Namespace, job.ID) @@ -2265,7 +2265,7 @@ func TestStateStore_RestoreJobSummary(t *testing.T) { JobID: job.ID, Namespace: job.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{ + "web": { Starting: 10, }, }, @@ -2316,7 +2316,7 @@ func TestStateStore_Indexes(t *testing.T) { } expect := []*IndexEntry{ - &IndexEntry{"nodes", 1000}, + {"nodes", 1000}, } if !reflect.DeepEqual(expect, out) { @@ -3066,7 +3066,7 @@ func TestStateStore_UpdateAllocsFromClient(t *testing.T) { } // Create the delta updates - ts := map[string]*structs.TaskState{"web": &structs.TaskState{State: structs.TaskStateRunning}} + ts := map[string]*structs.TaskState{"web": {State: structs.TaskStateRunning}} update := &structs.Allocation{ ID: alloc.ID, ClientStatus: structs.AllocClientStatusComplete, @@ -3154,7 +3154,7 @@ func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { } // Create the delta updates - ts := map[string]*structs.TaskState{"web": &structs.TaskState{State: structs.TaskStatePending}} + ts := map[string]*structs.TaskState{"web": {State: structs.TaskStatePending}} update := &structs.Allocation{ ID: alloc1.ID, ClientStatus: structs.AllocClientStatusFailed, @@ -3253,7 +3253,7 @@ func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { } // Create the delta updates - ts := map[string]*structs.TaskState{"web": &structs.TaskState{State: structs.TaskStatePending}} + ts := map[string]*structs.TaskState{"web": {State: structs.TaskStatePending}} update := &structs.Allocation{ ID: alloc.ID, ClientStatus: structs.AllocClientStatusRunning, @@ -3293,7 +3293,7 @@ func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { JobID: alloc.JobID, Namespace: alloc.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{ + "web": { Starting: 1, }, }, @@ -3766,7 +3766,7 @@ func TestStateStore_JobSummary(t *testing.T) { JobID: job.ID, Namespace: job.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{ + "web": { Running: 1, }, }, @@ -3820,7 +3820,7 @@ func TestStateStore_JobSummary(t *testing.T) { JobID: job.ID, Namespace: job.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{}, + "web": {}, }, Children: new(structs.JobChildrenSummary), CreateIndex: 1000, @@ -3903,10 +3903,10 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { JobID: alloc.Job.ID, Namespace: alloc.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{ + "web": { Running: 1, }, - "db": structs.TaskGroupSummary{ + "db": { Starting: 1, Running: 1, Failed: 1, @@ -3957,7 +3957,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { JobID: alloc1.JobID, Namespace: alloc1.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{}, + "web": {}, }, Children: new(structs.JobChildrenSummary), CreateIndex: 500, @@ -4188,6 +4188,10 @@ func TestStateStore_AllocsForRegisteredJob(t *testing.T) { } out1, err := state.AllocsByJob(ws, job1.Namespace, job1.ID, false) + if err != nil { + t.Fatalf("bad: %v", err) + } + expected = len(allocs1) if len(out1) != expected { t.Fatalf("expected: %v, actual: %v", expected, len(out1)) @@ -5247,11 +5251,11 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion d.JobID = j.ID d.TaskGroups = map[string]*structs.DeploymentState{ - "web": &structs.DeploymentState{ + "web": { DesiredTotal: 10, DesiredCanaries: 1, }, - "foo": &structs.DeploymentState{ + "foo": { DesiredTotal: 10, DesiredCanaries: 1, }, @@ -5343,11 +5347,11 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { d := mock.Deployment() d.JobID = j.ID d.TaskGroups = map[string]*structs.DeploymentState{ - "web": &structs.DeploymentState{ + "web": { DesiredTotal: 10, DesiredCanaries: 1, }, - "foo": &structs.DeploymentState{ + "foo": { DesiredTotal: 10, DesiredCanaries: 1, }, diff --git a/nomad/structs/funcs_test.go b/nomad/structs/funcs_test.go index 7ae921fc0..2e057ac15 100644 --- a/nomad/structs/funcs_test.go +++ b/nomad/structs/funcs_test.go @@ -11,10 +11,10 @@ import ( func TestRemoveAllocs(t *testing.T) { l := []*Allocation{ - &Allocation{ID: "foo"}, - &Allocation{ID: "bar"}, - &Allocation{ID: "baz"}, - &Allocation{ID: "zip"}, + {ID: "foo"}, + {ID: "bar"}, + {ID: "baz"}, + {ID: "zip"}, } out := RemoveAllocs(l, []*Allocation{l[1], l[3]}) @@ -28,25 +28,25 @@ func TestRemoveAllocs(t *testing.T) { func TestFilterTerminalAllocs(t *testing.T) { l := []*Allocation{ - &Allocation{ + { ID: "bar", Name: "myname1", DesiredStatus: AllocDesiredStatusEvict, }, - &Allocation{ID: "baz", DesiredStatus: AllocDesiredStatusStop}, - &Allocation{ + {ID: "baz", DesiredStatus: AllocDesiredStatusStop}, + { ID: "foo", DesiredStatus: AllocDesiredStatusRun, ClientStatus: AllocClientStatusPending, }, - &Allocation{ + { ID: "bam", Name: "myname", DesiredStatus: AllocDesiredStatusRun, ClientStatus: AllocClientStatusComplete, CreateIndex: 5, }, - &Allocation{ + { ID: "lol", Name: "myname", DesiredStatus: AllocDesiredStatusRun, @@ -80,7 +80,7 @@ func TestAllocsFit_PortsOvercommitted(t *testing.T) { n := &Node{ Resources: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", CIDR: "10.0.0.0/8", MBits: 100, @@ -99,9 +99,9 @@ func TestAllocsFit_PortsOvercommitted(t *testing.T) { }, }, TaskResources: map[string]*Resources{ - "web": &Resources{ + "web": { Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", IP: "10.0.0.1", MBits: 50, @@ -139,7 +139,7 @@ func TestAllocsFit(t *testing.T) { DiskMB: 10000, IOPS: 100, Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", CIDR: "10.0.0.0/8", MBits: 100, @@ -152,7 +152,7 @@ func TestAllocsFit(t *testing.T) { DiskMB: 5000, IOPS: 50, Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", IP: "10.0.0.1", MBits: 50, @@ -169,7 +169,7 @@ func TestAllocsFit(t *testing.T) { DiskMB: 5000, IOPS: 50, Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", IP: "10.0.0.1", MBits: 50, diff --git a/nomad/structs/network_test.go b/nomad/structs/network_test.go index 7752384ff..2f641d1ea 100644 --- a/nomad/structs/network_test.go +++ b/nomad/structs/network_test.go @@ -28,7 +28,7 @@ func TestNetworkIndex_Overcommitted(t *testing.T) { n := &Node{ Resources: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", CIDR: "192.168.0.100/32", MBits: 1000, @@ -53,7 +53,7 @@ func TestNetworkIndex_SetNode(t *testing.T) { n := &Node{ Resources: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", CIDR: "192.168.0.100/32", MBits: 1000, @@ -62,7 +62,7 @@ func TestNetworkIndex_SetNode(t *testing.T) { }, Reserved: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", IP: "192.168.0.100", ReservedPorts: []Port{{"ssh", 22}}, @@ -93,11 +93,11 @@ func TestNetworkIndex_SetNode(t *testing.T) { func TestNetworkIndex_AddAllocs(t *testing.T) { idx := NewNetworkIndex() allocs := []*Allocation{ - &Allocation{ + { TaskResources: map[string]*Resources{ - "web": &Resources{ + "web": { Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", IP: "192.168.0.100", MBits: 20, @@ -107,11 +107,11 @@ func TestNetworkIndex_AddAllocs(t *testing.T) { }, }, }, - &Allocation{ + { TaskResources: map[string]*Resources{ - "api": &Resources{ + "api": { Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", IP: "192.168.0.100", MBits: 50, @@ -177,7 +177,7 @@ func TestNetworkIndex_yieldIP(t *testing.T) { n := &Node{ Resources: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", CIDR: "192.168.0.100/30", MBits: 1000, @@ -186,7 +186,7 @@ func TestNetworkIndex_yieldIP(t *testing.T) { }, Reserved: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", IP: "192.168.0.100", ReservedPorts: []Port{{"ssh", 22}}, @@ -215,7 +215,7 @@ func TestNetworkIndex_AssignNetwork(t *testing.T) { n := &Node{ Resources: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", CIDR: "192.168.0.100/30", MBits: 1000, @@ -224,7 +224,7 @@ func TestNetworkIndex_AssignNetwork(t *testing.T) { }, Reserved: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", IP: "192.168.0.100", ReservedPorts: []Port{{"ssh", 22}}, @@ -236,11 +236,11 @@ func TestNetworkIndex_AssignNetwork(t *testing.T) { idx.SetNode(n) allocs := []*Allocation{ - &Allocation{ + { TaskResources: map[string]*Resources{ - "web": &Resources{ + "web": { Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", IP: "192.168.0.100", MBits: 20, @@ -250,11 +250,11 @@ func TestNetworkIndex_AssignNetwork(t *testing.T) { }, }, }, - &Allocation{ + { TaskResources: map[string]*Resources{ - "api": &Resources{ + "api": { Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", IP: "192.168.0.100", MBits: 50, @@ -352,7 +352,7 @@ func TestNetworkIndex_AssignNetwork_Dynamic_Contention(t *testing.T) { n := &Node{ Resources: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", CIDR: "192.168.0.100/32", MBits: 1000, @@ -361,7 +361,7 @@ func TestNetworkIndex_AssignNetwork_Dynamic_Contention(t *testing.T) { }, Reserved: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", IP: "192.168.0.100", MBits: 1, diff --git a/nomad/structs/node_class_test.go b/nomad/structs/node_class_test.go index 56a520197..4db3f7558 100644 --- a/nomad/structs/node_class_test.go +++ b/nomad/structs/node_class_test.go @@ -22,7 +22,7 @@ func testNode() *Node { DiskMB: 100 * 1024, IOPS: 150, Networks: []*NetworkResource{ - &NetworkResource{ + { Device: "eth0", CIDR: "192.168.0.100/32", MBits: 1000, diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 1ca19e35d..f6df8df86 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -3424,7 +3424,7 @@ func validateServices(t *Task) error { if t.Resources != nil { for _, network := range t.Resources.Networks { ports := network.PortLabels() - for portLabel, _ := range ports { + for portLabel := range ports { portLabels[portLabel] = struct{}{} } } diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index d4858cc78..cfcafce9f 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -71,7 +71,7 @@ func TestJob_Validate(t *testing.T) { Priority: 50, Datacenters: []string{"dc1"}, TaskGroups: []*TaskGroup{ - &TaskGroup{ + { Name: "web", RestartPolicy: &RestartPolicy{ Interval: 5 * time.Minute, @@ -79,7 +79,7 @@ func TestJob_Validate(t *testing.T) { Attempts: 10, }, }, - &TaskGroup{ + { Name: "web", RestartPolicy: &RestartPolicy{ Interval: 5 * time.Minute, @@ -87,7 +87,7 @@ func TestJob_Validate(t *testing.T) { Attempts: 10, }, }, - &TaskGroup{ + { RestartPolicy: &RestartPolicy{ Interval: 5 * time.Minute, Delay: 10 * time.Second, @@ -538,7 +538,7 @@ func testJob() *Job { AllAtOnce: false, Datacenters: []string{"dc1"}, Constraints: []*Constraint{ - &Constraint{ + { LTarget: "$attr.kernel.name", RTarget: "linux", Operand: "=", @@ -548,7 +548,7 @@ func testJob() *Job { Enabled: false, }, TaskGroups: []*TaskGroup{ - &TaskGroup{ + { Name: "web", Count: 10, EphemeralDisk: DefaultEphemeralDisk(), @@ -559,7 +559,7 @@ func testJob() *Job { Delay: 1 * time.Minute, }, Tasks: []*Task{ - &Task{ + { Name: "web", Driver: "exec", Config: map[string]interface{}{ @@ -583,7 +583,7 @@ func testJob() *Job { CPU: 500, MemoryMB: 256, Networks: []*NetworkResource{ - &NetworkResource{ + { MBits: 50, DynamicPorts: []Port{{Label: "http"}}, }, @@ -679,26 +679,26 @@ func TestJob_VaultPolicies(t *testing.T) { } j1 := &Job{ TaskGroups: []*TaskGroup{ - &TaskGroup{ + { Name: "foo", Tasks: []*Task{ - &Task{ + { Name: "t1", }, - &Task{ + { Name: "t2", Vault: vj1, }, }, }, - &TaskGroup{ + { Name: "bar", Tasks: []*Task{ - &Task{ + { Name: "t3", Vault: vj2, }, - &Task{ + { Name: "t4", Vault: vj3, }, @@ -708,10 +708,10 @@ func TestJob_VaultPolicies(t *testing.T) { } e1 := map[string]map[string]*Vault{ - "foo": map[string]*Vault{ + "foo": { "t2": vj1, }, - "bar": map[string]*Vault{ + "bar": { "t3": vj2, "t4": vj3, }, @@ -765,28 +765,28 @@ func TestJob_RequiredSignals(t *testing.T) { } j1 := &Job{ TaskGroups: []*TaskGroup{ - &TaskGroup{ + { Name: "foo", Tasks: []*Task{ - &Task{ + { Name: "t1", }, - &Task{ + { Name: "t2", Vault: vj2, Templates: []*Template{tj2}, }, }, }, - &TaskGroup{ + { Name: "bar", Tasks: []*Task{ - &Task{ + { Name: "t3", Vault: vj1, Templates: []*Template{tj1}, }, - &Task{ + { Name: "t4", Vault: vj2, }, @@ -796,11 +796,11 @@ func TestJob_RequiredSignals(t *testing.T) { } e1 := map[string]map[string][]string{ - "foo": map[string][]string{ - "t2": []string{"SIGUSR1", "SIGUSR2"}, + "foo": { + "t2": {"SIGUSR1", "SIGUSR2"}, }, - "bar": map[string][]string{ - "t4": []string{"SIGUSR1"}, + "bar": { + "t4": {"SIGUSR1"}, }, } @@ -851,21 +851,21 @@ func TestTaskGroup_Validate(t *testing.T) { tg = &TaskGroup{ Tasks: []*Task{ - &Task{ + { Name: "task-a", Resources: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { ReservedPorts: []Port{{Label: "foo", Value: 123}}, }, }, }, }, - &Task{ + { Name: "task-b", Resources: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { ReservedPorts: []Port{{Label: "foo", Value: 123}}, }, }, @@ -881,11 +881,11 @@ func TestTaskGroup_Validate(t *testing.T) { tg = &TaskGroup{ Tasks: []*Task{ - &Task{ + { Name: "task-a", Resources: &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { ReservedPorts: []Port{ {Label: "foo", Value: 123}, {Label: "bar", Value: 123}, @@ -906,9 +906,9 @@ func TestTaskGroup_Validate(t *testing.T) { Name: "web", Count: 1, Tasks: []*Task{ - &Task{Name: "web", Leader: true}, - &Task{Name: "web", Leader: true}, - &Task{}, + {Name: "web", Leader: true}, + {Name: "web", Leader: true}, + {}, }, RestartPolicy: &RestartPolicy{ Interval: 5 * time.Minute, @@ -1059,14 +1059,14 @@ func TestTask_Validate_Services(t *testing.T) { LogConfig: DefaultLogConfig(), } task1.Resources.Networks = []*NetworkResource{ - &NetworkResource{ + { MBits: 10, DynamicPorts: []Port{ - Port{ + { Label: "a", Value: 1000, }, - Port{ + { Label: "b", Value: 2000, }, @@ -1458,9 +1458,9 @@ func TestUpdateStrategy_Validate(t *testing.T) { func TestResource_NetIndex(t *testing.T) { r := &Resources{ Networks: []*NetworkResource{ - &NetworkResource{Device: "eth0"}, - &NetworkResource{Device: "lo0"}, - &NetworkResource{Device: ""}, + {Device: "eth0"}, + {Device: "lo0"}, + {Device: ""}, }, } if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 { @@ -1509,7 +1509,7 @@ func TestResource_Add(t *testing.T) { DiskMB: 10000, IOPS: 100, Networks: []*NetworkResource{ - &NetworkResource{ + { CIDR: "10.0.0.0/8", MBits: 100, ReservedPorts: []Port{{"ssh", 22}}, @@ -1522,7 +1522,7 @@ func TestResource_Add(t *testing.T) { DiskMB: 5000, IOPS: 50, Networks: []*NetworkResource{ - &NetworkResource{ + { IP: "10.0.0.1", MBits: 50, ReservedPorts: []Port{{"web", 80}}, @@ -1541,7 +1541,7 @@ func TestResource_Add(t *testing.T) { DiskMB: 15000, IOPS: 150, Networks: []*NetworkResource{ - &NetworkResource{ + { CIDR: "10.0.0.0/8", MBits: 150, ReservedPorts: []Port{{"ssh", 22}, {"web", 80}}, @@ -1558,7 +1558,7 @@ func TestResource_Add_Network(t *testing.T) { r1 := &Resources{} r2 := &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { MBits: 50, DynamicPorts: []Port{{"http", 0}, {"https", 0}}, }, @@ -1566,7 +1566,7 @@ func TestResource_Add_Network(t *testing.T) { } r3 := &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { MBits: 25, DynamicPorts: []Port{{"admin", 0}}, }, @@ -1584,7 +1584,7 @@ func TestResource_Add_Network(t *testing.T) { expect := &Resources{ Networks: []*NetworkResource{ - &NetworkResource{ + { MBits: 75, DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}}, }, @@ -1798,7 +1798,7 @@ func TestJob_ExpandServiceNames(t *testing.T) { j := &Job{ Name: "my-job", TaskGroups: []*TaskGroup{ - &TaskGroup{ + { Name: "web", Tasks: []*Task{ { @@ -1814,7 +1814,7 @@ func TestJob_ExpandServiceNames(t *testing.T) { }, }, }, - &TaskGroup{ + { Name: "admin", Tasks: []*Task{ { @@ -1890,7 +1890,7 @@ func TestPeriodicConfig_ValidCron(t *testing.T) { func TestPeriodicConfig_NextCron(t *testing.T) { from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) specs := []string{"0 0 29 2 * 1980", "*/5 * * * *"} - expected := []time.Time{time.Time{}, time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC)} + expected := []time.Time{{}, time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC)} for i, spec := range specs { p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} p.Canonicalize() diff --git a/nomad/system_endpoint_test.go b/nomad/system_endpoint_test.go index 465df32a2..bca47357c 100644 --- a/nomad/system_endpoint_test.go +++ b/nomad/system_endpoint_test.go @@ -108,7 +108,7 @@ func TestSystemEndpoint_ReconcileSummaries(t *testing.T) { JobID: job.ID, Namespace: job.Namespace, Summary: map[string]structs.TaskGroupSummary{ - "web": structs.TaskGroupSummary{ + "web": { Queued: 10, }, }, diff --git a/nomad/vault.go b/nomad/vault.go index 28b91111d..4b4fb5556 100644 --- a/nomad/vault.go +++ b/nomad/vault.go @@ -59,9 +59,6 @@ const ( // vaultTokenLookupPath is the path used to lookup a token vaultTokenLookupPath = "auth/token/lookup" - // vaultTokenLookupSelfPath is the path used to lookup self token - vaultTokenLookupSelfPath = "auth/token/lookup-self" - // vaultTokenRevokePath is the path used to revoke a token vaultTokenRevokePath = "auth/token/revoke-accessor" @@ -91,11 +88,6 @@ var ( // the capabilities. vaultTokenLookupCapability = []string{"update", "root"} - // vaultTokenLookupSelfCapability is the expected capability Nomad's - // Vault token should have on the path. The token must have at least one of - // the capabilities. - vaultTokenLookupSelfCapability = []string{"update", "root"} - // vaultTokenRevokeCapability is the expected capability Nomad's // Vault token should have on the path. The token must have at least one of // the capabilities. diff --git a/nomad/vault_test.go b/nomad/vault_test.go index cb69caf5a..c34995b35 100644 --- a/nomad/vault_test.go +++ b/nomad/vault_test.go @@ -1140,8 +1140,8 @@ func TestVaultClient_RevokeTokens_Root(t *testing.T) { // Create two VaultAccessors vas := []*structs.VaultAccessor{ - &structs.VaultAccessor{Accessor: t1.Auth.Accessor}, - &structs.VaultAccessor{Accessor: t2.Auth.Accessor}, + {Accessor: t1.Auth.Accessor}, + {Accessor: t2.Auth.Accessor}, } // Issue a token revocation @@ -1208,8 +1208,8 @@ func TestVaultClient_RevokeTokens_Role(t *testing.T) { // Create two VaultAccessors vas := []*structs.VaultAccessor{ - &structs.VaultAccessor{Accessor: t1.Auth.Accessor}, - &structs.VaultAccessor{Accessor: t2.Auth.Accessor}, + {Accessor: t1.Auth.Accessor}, + {Accessor: t2.Auth.Accessor}, } // Issue a token revocation diff --git a/nomad/worker_test.go b/nomad/worker_test.go index 567a31321..938e25916 100644 --- a/nomad/worker_test.go +++ b/nomad/worker_test.go @@ -68,7 +68,7 @@ func TestWorker_dequeueEvaluation(t *testing.T) { t.Fatalf("should get token") } if waitIndex != eval1.ModifyIndex { - t.Fatalf("bad wait index; got %d; want %d", eval1.ModifyIndex) + t.Fatalf("bad wait index; got %d; want %d", waitIndex, eval1.ModifyIndex) } // Ensure we get a sane eval @@ -113,7 +113,7 @@ func TestWorker_dequeueEvaluation_SerialJobs(t *testing.T) { t.Fatalf("should get token") } if waitIndex != eval1.ModifyIndex { - t.Fatalf("bad wait index; got %d; want %d", eval1.ModifyIndex) + t.Fatalf("bad wait index; got %d; want %d", waitIndex, eval1.ModifyIndex) } // Ensure we get a sane eval @@ -185,7 +185,7 @@ func TestWorker_dequeueEvaluation_paused(t *testing.T) { t.Fatalf("should get token") } if waitIndex != eval1.ModifyIndex { - t.Fatalf("bad wait index; got %d; want %d", eval1.ModifyIndex) + t.Fatalf("bad wait index; got %d; want %d", waitIndex, eval1.ModifyIndex) } // Ensure we get a sane eval @@ -356,7 +356,7 @@ func TestWorker_SubmitPlan(t *testing.T) { plan := &structs.Plan{ EvalID: eval1.ID, NodeAllocation: map[string][]*structs.Allocation{ - node.ID: []*structs.Allocation{alloc}, + node.ID: {alloc}, }, } @@ -416,7 +416,7 @@ func TestWorker_SubmitPlan_MissingNodeRefresh(t *testing.T) { plan := &structs.Plan{ EvalID: eval1.ID, NodeAllocation: map[string][]*structs.Allocation{ - node2.ID: []*structs.Allocation{alloc}, + node2.ID: {alloc}, }, } diff --git a/scheduler/annotate_test.go b/scheduler/annotate_test.go index 0836ff3e8..54e90d7c5 100644 --- a/scheduler/annotate_test.go +++ b/scheduler/annotate_test.go @@ -10,7 +10,7 @@ import ( func TestAnnotateTaskGroup_Updates(t *testing.T) { annotations := &structs.PlanAnnotations{ DesiredTGUpdates: map[string]*structs.DesiredUpdates{ - "foo": &structs.DesiredUpdates{ + "foo": { Ignore: 1, Place: 2, Migrate: 3, diff --git a/scheduler/context_test.go b/scheduler/context_test.go index a74c69416..fb86fec7f 100644 --- a/scheduler/context_test.go +++ b/scheduler/context_test.go @@ -30,7 +30,7 @@ func testContext(t testing.TB) (*state.StateStore, *EvalContext) { func TestEvalContext_ProposedAlloc(t *testing.T) { state, ctx := testContext(t) nodes := []*RankedNode{ - &RankedNode{ + { Node: &structs.Node{ // Perfect fit ID: structs.GenerateUUID(), @@ -40,7 +40,7 @@ func TestEvalContext_ProposedAlloc(t *testing.T) { }, }, }, - &RankedNode{ + { Node: &structs.Node{ // Perfect fit ID: structs.GenerateUUID(), @@ -94,7 +94,7 @@ func TestEvalContext_ProposedAlloc(t *testing.T) { // Add a planned placement to node1 plan.NodeAllocation[nodes[1].Node.ID] = []*structs.Allocation{ - &structs.Allocation{ + { Resources: &structs.Resources{ CPU: 1024, MemoryMB: 1024, diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index 23f889103..e98531738 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -91,8 +91,8 @@ func TestDriverChecker(t *testing.T) { nodes[3].Attributes["driver.foo"] = "False" drivers := map[string]struct{}{ - "exec": struct{}{}, - "foo": struct{}{}, + "exec": {}, + "foo": {}, } checker := NewDriverChecker(ctx, drivers) cases := []struct { @@ -138,17 +138,17 @@ func TestConstraintChecker(t *testing.T) { nodes[2].NodeClass = "large" constraints := []*structs.Constraint{ - &structs.Constraint{ + { Operand: "=", LTarget: "${node.datacenter}", RTarget: "dc1", }, - &structs.Constraint{ + { Operand: "is", LTarget: "${attr.kernel.name}", RTarget: "linux", }, - &structs.Constraint{ + { Operand: "is", LTarget: "${node.class}", RTarget: "large", @@ -461,7 +461,7 @@ func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) { // job unsatisfiable on all nodes but node3 plan := ctx.Plan() plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -470,7 +470,7 @@ func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) { }, // Should be ignored as it is a different job. - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: "ignore 2", @@ -479,7 +479,7 @@ func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) { }, } plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -488,7 +488,7 @@ func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) { }, // Should be ignored as it is a different job. - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: "ignore 2", @@ -535,7 +535,7 @@ func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) { // job unsatisfiable for tg3 plan := ctx.Plan() plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -543,7 +543,7 @@ func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) { }, } plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -582,7 +582,7 @@ func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) { // Add a planned alloc to node1. plan := ctx.Plan() plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: "foo", @@ -592,7 +592,7 @@ func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) { // Add a planned alloc to node2 with the same task group name but a // different job. plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: "bar", @@ -673,7 +673,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { plan := ctx.Plan() alloc1ID := structs.GenerateUUID() plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -683,7 +683,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { }, // Should be ignored as it is a different job. - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: "ignore 2", @@ -693,7 +693,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { }, } plan.NodeAllocation[nodes[2].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -703,7 +703,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { }, // Should be ignored as it is a different job. - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: "ignore 2", @@ -716,7 +716,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { // Put an allocation on Node 5 but make it stopped in the plan stoppingAllocID := structs.GenerateUUID() plan.NodeUpdate[nodes[4].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -729,7 +729,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { upserting := []*structs.Allocation{ // Have one of the allocations exist in both the plan and the state // store. This resembles an allocation update - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -739,7 +739,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { NodeID: nodes[0].ID, }, - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -750,7 +750,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { }, // Should be ignored as it is a different job. - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: "ignore 2", @@ -759,7 +759,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { EvalID: structs.GenerateUUID(), NodeID: nodes[1].ID, }, - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -770,7 +770,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { }, // Should be ignored as it is a different job. - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: "ignore 2", @@ -779,7 +779,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { EvalID: structs.GenerateUUID(), NodeID: nodes[3].ID, }, - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -852,7 +852,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { plan := ctx.Plan() alloc1ID := structs.GenerateUUID() plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -861,7 +861,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { NodeID: nodes[0].ID, }, - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -871,7 +871,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { }, // Should be ignored as it is a different job. - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: "ignore 2", @@ -881,7 +881,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { }, } plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -890,7 +890,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { NodeID: nodes[1].ID, }, - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -900,7 +900,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { }, // Should be ignored as it is a different job. - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: "ignore 2", @@ -910,7 +910,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { }, } plan.NodeAllocation[nodes[2].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -920,7 +920,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { }, // Should be ignored as it is a different job. - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: "ignore 2", @@ -933,7 +933,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { // Put an allocation on Node 3 but make it stopped in the plan stoppingAllocID := structs.GenerateUUID() plan.NodeUpdate[nodes[2].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -946,7 +946,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { upserting := []*structs.Allocation{ // Have one of the allocations exist in both the plan and the state // store. This resembles an allocation update - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -956,7 +956,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { NodeID: nodes[0].ID, }, - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -966,7 +966,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { NodeID: nodes[1].ID, }, - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -977,7 +977,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { }, // Should be ignored as it is a different job. - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: "ignore 2", @@ -986,7 +986,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { EvalID: structs.GenerateUUID(), NodeID: nodes[1].ID, }, - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: "ignore 2", @@ -1048,7 +1048,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testin plan := ctx.Plan() plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -1060,7 +1060,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testin stoppingAllocID := structs.GenerateUUID() plan.NodeUpdate[nodes[0].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -1071,7 +1071,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testin } upserting := []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -1138,7 +1138,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) { // job unsatisfiable for tg3. plan := ctx.Plan() plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -1148,7 +1148,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) { }, } upserting := []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -1216,7 +1216,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin // make the job unsatisfiable for tg3. plan := ctx.Plan() plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -1224,7 +1224,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin ID: structs.GenerateUUID(), NodeID: nodes[0].ID, }, - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -1234,7 +1234,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin }, } upserting := []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -1243,7 +1243,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin EvalID: structs.GenerateUUID(), NodeID: nodes[1].ID, }, - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg2.Name, JobID: job.ID, @@ -1313,7 +1313,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) { // existing in the plan and the state store. plan := ctx.Plan() plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -1326,7 +1326,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) { // Put an allocation on Node 3 but make it stopped in the plan stoppingAllocID := structs.GenerateUUID() plan.NodeUpdate[nodes[2].ID] = []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -1337,7 +1337,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) { } upserting := []*structs.Allocation{ - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, @@ -1348,7 +1348,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) { }, // Should be ignored as it is a different job. - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: "ignore 2", @@ -1358,7 +1358,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) { NodeID: nodes[2].ID, }, - &structs.Allocation{ + { Namespace: structs.DefaultNamespace, TaskGroup: tg1.Name, JobID: job.ID, diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index 0b085ca1d..386c66d79 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -2821,7 +2821,7 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { alloc.Name = "my-job.web[0]" alloc.ClientStatus = structs.AllocClientStatusComplete alloc.TaskStates = map[string]*structs.TaskState{ - "web": &structs.TaskState{ + "web": { State: structs.TaskStateDead, Events: []*structs.TaskEvent{ { @@ -2963,7 +2963,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) alloc.ClientStatus = structs.AllocClientStatusComplete alloc.TaskStates = map[string]*structs.TaskState{ - "web": &structs.TaskState{ + "web": { State: structs.TaskStateDead, Events: []*structs.TaskEvent{ { diff --git a/scheduler/rank_test.go b/scheduler/rank_test.go index 739e9f284..a4a3c3470 100644 --- a/scheduler/rank_test.go +++ b/scheduler/rank_test.go @@ -26,7 +26,7 @@ func TestFeasibleRankIterator(t *testing.T) { func TestBinPackIterator_NoExistingAlloc(t *testing.T) { _, ctx := testContext(t) nodes := []*RankedNode{ - &RankedNode{ + { Node: &structs.Node{ // Perfect fit Resources: &structs.Resources{ @@ -39,7 +39,7 @@ func TestBinPackIterator_NoExistingAlloc(t *testing.T) { }, }, }, - &RankedNode{ + { Node: &structs.Node{ // Overloaded Resources: &structs.Resources{ @@ -52,7 +52,7 @@ func TestBinPackIterator_NoExistingAlloc(t *testing.T) { }, }, }, - &RankedNode{ + { Node: &structs.Node{ // 50% fit Resources: &structs.Resources{ @@ -102,7 +102,7 @@ func TestBinPackIterator_NoExistingAlloc(t *testing.T) { func TestBinPackIterator_PlannedAlloc(t *testing.T) { _, ctx := testContext(t) nodes := []*RankedNode{ - &RankedNode{ + { Node: &structs.Node{ // Perfect fit ID: structs.GenerateUUID(), @@ -112,7 +112,7 @@ func TestBinPackIterator_PlannedAlloc(t *testing.T) { }, }, }, - &RankedNode{ + { Node: &structs.Node{ // Perfect fit ID: structs.GenerateUUID(), @@ -128,7 +128,7 @@ func TestBinPackIterator_PlannedAlloc(t *testing.T) { // Add a planned alloc to node1 that fills it plan := ctx.Plan() plan.NodeAllocation[nodes[0].Node.ID] = []*structs.Allocation{ - &structs.Allocation{ + { Resources: &structs.Resources{ CPU: 2048, MemoryMB: 2048, @@ -138,7 +138,7 @@ func TestBinPackIterator_PlannedAlloc(t *testing.T) { // Add a planned alloc to node2 that half fills it plan.NodeAllocation[nodes[1].Node.ID] = []*structs.Allocation{ - &structs.Allocation{ + { Resources: &structs.Resources{ CPU: 1024, MemoryMB: 1024, @@ -178,7 +178,7 @@ func TestBinPackIterator_PlannedAlloc(t *testing.T) { func TestBinPackIterator_ExistingAlloc(t *testing.T) { state, ctx := testContext(t) nodes := []*RankedNode{ - &RankedNode{ + { Node: &structs.Node{ // Perfect fit ID: structs.GenerateUUID(), @@ -188,7 +188,7 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) { }, }, }, - &RankedNode{ + { Node: &structs.Node{ // Perfect fit ID: structs.GenerateUUID(), @@ -267,7 +267,7 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) { func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) { state, ctx := testContext(t) nodes := []*RankedNode{ - &RankedNode{ + { Node: &structs.Node{ // Perfect fit ID: structs.GenerateUUID(), @@ -277,7 +277,7 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) { }, }, }, - &RankedNode{ + { Node: &structs.Node{ // Perfect fit ID: structs.GenerateUUID(), @@ -364,12 +364,12 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) { func TestJobAntiAffinity_PlannedAlloc(t *testing.T) { _, ctx := testContext(t) nodes := []*RankedNode{ - &RankedNode{ + { Node: &structs.Node{ ID: structs.GenerateUUID(), }, }, - &RankedNode{ + { Node: &structs.Node{ ID: structs.GenerateUUID(), }, @@ -380,11 +380,11 @@ func TestJobAntiAffinity_PlannedAlloc(t *testing.T) { // Add a planned alloc to node1 that fills it plan := ctx.Plan() plan.NodeAllocation[nodes[0].Node.ID] = []*structs.Allocation{ - &structs.Allocation{ + { ID: structs.GenerateUUID(), JobID: "foo", }, - &structs.Allocation{ + { ID: structs.GenerateUUID(), JobID: "foo", }, @@ -392,7 +392,7 @@ func TestJobAntiAffinity_PlannedAlloc(t *testing.T) { // Add a planned alloc to node2 that half fills it plan.NodeAllocation[nodes[1].Node.ID] = []*structs.Allocation{ - &structs.Allocation{ + { JobID: "bar", }, } diff --git a/scheduler/reconcile_util.go b/scheduler/reconcile_util.go index 3ca6150ba..f67198a3c 100644 --- a/scheduler/reconcile_util.go +++ b/scheduler/reconcile_util.go @@ -89,10 +89,8 @@ func newAllocMatrix(job *structs.Job, allocs []*structs.Allocation) allocMatrix if job != nil { for _, tg := range job.TaskGroups { - s, ok := m[tg.Name] - if !ok { - s = make(map[string]*structs.Allocation) - m[tg.Name] = s + if _, ok := m[tg.Name]; !ok { + m[tg.Name] = make(map[string]*structs.Allocation) } } } @@ -103,15 +101,6 @@ func newAllocMatrix(job *structs.Job, allocs []*structs.Allocation) allocMatrix // that help reconcile state. type allocSet map[string]*structs.Allocation -// newAllocSet creates an allocation set given a set of allocations -func newAllocSet(allocs []*structs.Allocation) allocSet { - s := make(map[string]*structs.Allocation, len(allocs)) - for _, a := range allocs { - s[a.ID] = a - } - return s -} - // GoString provides a human readable view of the set func (a allocSet) GoString() string { if len(a) == 0 { @@ -281,7 +270,7 @@ func bitmapFrom(input allocSet, minSize uint) structs.Bitmap { max = minSize } else if max%8 == 0 { // This may be possible if the job was scaled down. We want to make sure - // that the max index is not byte-alligned otherwise we will overflow + // that the max index is not byte-aligned otherwise we will overflow // the bitmap. max++ } diff --git a/scheduler/reconcile_util_test.go b/scheduler/reconcile_util_test.go index 2229f6098..57f3eb7b9 100644 --- a/scheduler/reconcile_util_test.go +++ b/scheduler/reconcile_util_test.go @@ -8,7 +8,7 @@ import ( // Test that we properly create the bitmap even when the alloc set includes an // allocation with a higher count than the current min count and it is byte -// alligned. +// aligned. // Ensure no regerssion from: https://github.com/hashicorp/nomad/issues/3008 func TestBitmapFrom(t *testing.T) { input := map[string]*structs.Allocation{ diff --git a/scheduler/select_test.go b/scheduler/select_test.go index c89a2f9a3..1c85c8dcb 100644 --- a/scheduler/select_test.go +++ b/scheduler/select_test.go @@ -9,15 +9,15 @@ import ( func TestLimitIterator(t *testing.T) { _, ctx := testContext(t) nodes := []*RankedNode{ - &RankedNode{ + { Node: mock.Node(), Score: 1, }, - &RankedNode{ + { Node: mock.Node(), Score: 2, }, - &RankedNode{ + { Node: mock.Node(), Score: 3, }, @@ -53,15 +53,15 @@ func TestLimitIterator(t *testing.T) { func TestMaxScoreIterator(t *testing.T) { _, ctx := testContext(t) nodes := []*RankedNode{ - &RankedNode{ + { Node: mock.Node(), Score: 1, }, - &RankedNode{ + { Node: mock.Node(), Score: 2, }, - &RankedNode{ + { Node: mock.Node(), Score: 3, }, diff --git a/scheduler/util.go b/scheduler/util.go index 841ffcad2..ffd1366ee 100644 --- a/scheduler/util.go +++ b/scheduler/util.go @@ -253,7 +253,7 @@ func readyNodesInDCs(state State, dcs []string) ([]*structs.Node, map[string]int continue } out = append(out, node) - dcMap[node.Datacenter] += 1 + dcMap[node.Datacenter]++ } return out, dcMap, nil } @@ -277,7 +277,7 @@ func retryMax(max int, cb func() (bool, error), reset func() bool) error { if reset != nil && reset() { attempts = 0 } else { - attempts += 1 + attempts++ } } return &SetStatusError{ @@ -570,24 +570,6 @@ func evictAndPlace(ctx Context, diff *diffResult, allocs []allocTuple, desc stri return true } -// markLostAndPlace is used to mark allocations as lost and add them to the -// placement queue. evictAndPlace modifies both the diffResult and the -// limit. It returns true if the limit has been reached. -func markLostAndPlace(ctx Context, diff *diffResult, allocs []allocTuple, desc string, limit *int) bool { - n := len(allocs) - for i := 0; i < n && i < *limit; i++ { - a := allocs[i] - ctx.Plan().AppendUpdate(a.Alloc, structs.AllocDesiredStatusStop, desc, structs.AllocClientStatusLost) - diff.place = append(diff.place, a) - } - if n <= *limit { - *limit -= n - return false - } - *limit = 0 - return true -} - // tgConstrainTuple is used to store the total constraints of a task group. type tgConstrainTuple struct { // Holds the combined constraints of the task group and all it's sub-tasks. @@ -716,7 +698,7 @@ func adjustQueuedAllocations(logger *log.Logger, result *structs.PlanResult, que } if _, ok := queuedAllocs[allocation.TaskGroup]; ok { - queuedAllocs[allocation.TaskGroup] -= 1 + queuedAllocs[allocation.TaskGroup]-- } else { logger.Printf("[ERR] sched: allocation %q placed but not in list of unplaced allocations", allocation.TaskGroup) } diff --git a/scheduler/util_test.go b/scheduler/util_test.go index 2917a94f6..ee7931c2d 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -60,7 +60,7 @@ func TestDiffAllocs(t *testing.T) { allocs := []*structs.Allocation{ // Update the 1st - &structs.Allocation{ + { ID: structs.GenerateUUID(), NodeID: "zip", Name: "my-job.web[0]", @@ -68,7 +68,7 @@ func TestDiffAllocs(t *testing.T) { }, // Ignore the 2rd - &structs.Allocation{ + { ID: structs.GenerateUUID(), NodeID: "zip", Name: "my-job.web[1]", @@ -76,7 +76,7 @@ func TestDiffAllocs(t *testing.T) { }, // Evict 11th - &structs.Allocation{ + { ID: structs.GenerateUUID(), NodeID: "zip", Name: "my-job.web[10]", @@ -84,14 +84,14 @@ func TestDiffAllocs(t *testing.T) { }, // Migrate the 3rd - &structs.Allocation{ + { ID: structs.GenerateUUID(), NodeID: "drainNode", Name: "my-job.web[2]", Job: oldJob, }, // Mark the 4th lost - &structs.Allocation{ + { ID: structs.GenerateUUID(), NodeID: "dead", Name: "my-job.web[3]", @@ -101,19 +101,19 @@ func TestDiffAllocs(t *testing.T) { // Have three terminal allocs terminalAllocs := map[string]*structs.Allocation{ - "my-job.web[4]": &structs.Allocation{ + "my-job.web[4]": { ID: structs.GenerateUUID(), NodeID: "zip", Name: "my-job.web[4]", Job: job, }, - "my-job.web[5]": &structs.Allocation{ + "my-job.web[5]": { ID: structs.GenerateUUID(), NodeID: "zip", Name: "my-job.web[5]", Job: job, }, - "my-job.web[6]": &structs.Allocation{ + "my-job.web[6]": { ID: structs.GenerateUUID(), NodeID: "zip", Name: "my-job.web[6]", @@ -197,7 +197,7 @@ func TestDiffSystemAllocs(t *testing.T) { allocs := []*structs.Allocation{ // Update allocation on baz - &structs.Allocation{ + { ID: structs.GenerateUUID(), NodeID: "baz", Name: "my-job.web[0]", @@ -205,7 +205,7 @@ func TestDiffSystemAllocs(t *testing.T) { }, // Ignore allocation on bar - &structs.Allocation{ + { ID: structs.GenerateUUID(), NodeID: "bar", Name: "my-job.web[0]", @@ -213,14 +213,14 @@ func TestDiffSystemAllocs(t *testing.T) { }, // Stop allocation on draining node. - &structs.Allocation{ + { ID: structs.GenerateUUID(), NodeID: drainNode.ID, Name: "my-job.web[0]", Job: oldJob, }, // Mark as lost on a dead node - &structs.Allocation{ + { ID: structs.GenerateUUID(), NodeID: deadNode.ID, Name: "my-job.web[0]", @@ -230,7 +230,7 @@ func TestDiffSystemAllocs(t *testing.T) { // Have three terminal allocs terminalAllocs := map[string]*structs.Allocation{ - "my-job.web[0]": &structs.Allocation{ + "my-job.web[0]": { ID: structs.GenerateUUID(), NodeID: "pipe", Name: "my-job.web[0]", @@ -393,11 +393,11 @@ func TestTaintedNodes(t *testing.T) { noErr(t, state.UpsertNode(1003, node4)) allocs := []*structs.Allocation{ - &structs.Allocation{NodeID: node1.ID}, - &structs.Allocation{NodeID: node2.ID}, - &structs.Allocation{NodeID: node3.ID}, - &structs.Allocation{NodeID: node4.ID}, - &structs.Allocation{NodeID: "12345678-abcd-efab-cdef-123456789abc"}, + {NodeID: node1.ID}, + {NodeID: node2.ID}, + {NodeID: node3.ID}, + {NodeID: node4.ID}, + {NodeID: "12345678-abcd-efab-cdef-123456789abc"}, } tainted, err := taintedNodes(state, allocs) if err != nil { @@ -576,10 +576,10 @@ func TestTasksUpdated(t *testing.T) { func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) { _, ctx := testContext(t) allocs := []allocTuple{ - allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, - allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, - allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, - allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, + {Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, + {Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, + {Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, + {Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, } diff := &diffResult{} @@ -600,10 +600,10 @@ func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) { func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) { _, ctx := testContext(t) allocs := []allocTuple{ - allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, - allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, - allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, - allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, + {Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, + {Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, + {Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, + {Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, } diff := &diffResult{} @@ -922,10 +922,10 @@ func TestInplaceUpdate_Success(t *testing.T) { func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) { _, ctx := testContext(t) allocs := []allocTuple{ - allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, - allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, - allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, - allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, + {Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, + {Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, + {Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, + {Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, } diff := &diffResult{} @@ -954,7 +954,7 @@ func TestTaskGroupConstraints(t *testing.T) { Constraints: []*structs.Constraint{constr}, EphemeralDisk: &structs.EphemeralDisk{}, Tasks: []*structs.Task{ - &structs.Task{ + { Driver: "exec", Resources: &structs.Resources{ CPU: 500, @@ -962,7 +962,7 @@ func TestTaskGroupConstraints(t *testing.T) { }, Constraints: []*structs.Constraint{constr2}, }, - &structs.Task{ + { Driver: "docker", Resources: &structs.Resources{ CPU: 500, @@ -975,7 +975,7 @@ func TestTaskGroupConstraints(t *testing.T) { // Build the expected values. expConstr := []*structs.Constraint{constr, constr2, constr3} - expDrivers := map[string]struct{}{"exec": struct{}{}, "docker": struct{}{}} + expDrivers := map[string]struct{}{"exec": {}, "docker": {}} expSize := &structs.Resources{ CPU: 1000, MemoryMB: 512, @@ -1001,7 +1001,7 @@ func TestProgressMade(t *testing.T) { } m := map[string][]*structs.Allocation{ - "foo": []*structs.Allocation{mock.Alloc()}, + "foo": {mock.Alloc()}, } both := &structs.PlanResult{ NodeAllocation: m, @@ -1027,29 +1027,29 @@ func TestDesiredUpdates(t *testing.T) { a2 := &structs.Allocation{TaskGroup: "bar"} place := []allocTuple{ - allocTuple{TaskGroup: tg1}, - allocTuple{TaskGroup: tg1}, - allocTuple{TaskGroup: tg1}, - allocTuple{TaskGroup: tg2}, + {TaskGroup: tg1}, + {TaskGroup: tg1}, + {TaskGroup: tg1}, + {TaskGroup: tg2}, } stop := []allocTuple{ - allocTuple{TaskGroup: tg2, Alloc: a2}, - allocTuple{TaskGroup: tg2, Alloc: a2}, + {TaskGroup: tg2, Alloc: a2}, + {TaskGroup: tg2, Alloc: a2}, } ignore := []allocTuple{ - allocTuple{TaskGroup: tg1}, + {TaskGroup: tg1}, } migrate := []allocTuple{ - allocTuple{TaskGroup: tg2}, + {TaskGroup: tg2}, } inplace := []allocTuple{ - allocTuple{TaskGroup: tg1}, - allocTuple{TaskGroup: tg1}, + {TaskGroup: tg1}, + {TaskGroup: tg1}, } destructive := []allocTuple{ - allocTuple{TaskGroup: tg1}, - allocTuple{TaskGroup: tg2}, - allocTuple{TaskGroup: tg2}, + {TaskGroup: tg1}, + {TaskGroup: tg2}, + {TaskGroup: tg2}, } diff := &diffResult{ place: place, @@ -1094,13 +1094,13 @@ func TestUtil_AdjustQueuedAllocations(t *testing.T) { planResult := structs.PlanResult{ NodeUpdate: map[string][]*structs.Allocation{ - "node-1": []*structs.Allocation{alloc1}, + "node-1": {alloc1}, }, NodeAllocation: map[string][]*structs.Allocation{ - "node-1": []*structs.Allocation{ + "node-1": { alloc2, }, - "node-2": []*structs.Allocation{ + "node-2": { alloc3, alloc4, }, },