diff --git a/.gitignore b/.gitignore index e407015f6..39dc26c6f 100644 --- a/.gitignore +++ b/.gitignore @@ -52,7 +52,6 @@ nomad_linux_amd64 nomad_darwin_amd64 TODO.md codecgen-*.generated.go -*.generated.go .terraform *.tfstate* diff --git a/.travis.yml b/.travis.yml index da9da6a13..022c49a28 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,9 +13,6 @@ branches: only: - master -matrix: - include: - matrix: include: - os: linux @@ -31,6 +28,9 @@ matrix: env: RUN_STATIC_CHECKS=1 SKIP_NOMAD_TESTS=1 - os: osx osx_image: xcode9.1 + allow_failures: + - os: osx + fast_finish: true cache: directories: @@ -43,6 +43,7 @@ before_install: install: - if [[ -z "$SKIP_NOMAD_TESTS" ]]; then make deps ; fi - if [[ "$RUN_STATIC_CHECKS" ]]; then make lint-deps ; fi + - if [[ "$RUN_UI_TESTS" ]]; then . $HOME/.nvm/nvm.sh && cd ui && nvm use && cd .. ; fi script: - sudo -E "PATH=$PATH" make travis diff --git a/CHANGELOG.md b/CHANGELOG.md index f746f5cc5..ba1fb8449 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,10 @@ -## 0.7.1 (Unreleased) +## 0.8 (Unreleased) + +BUG FIXES: + * core: Fix search endpoint forwarding for multi-region clusters [GH-3680] + * config: Revert minimum CPU limit back to 20 from 100. + +## 0.7.1 (December 19, 2017) __BACKWARDS INCOMPATIBILITIES:__ * client: The format of service IDs in Consul has changed. If you rely upon @@ -7,6 +13,8 @@ __BACKWARDS INCOMPATIBILITIES:__ * config: Nomad no longer parses Atlas configuration stanzas. Atlas has been deprecated since earlier this year. If you have an Atlas stanza in your config file it will have to be removed. + * config: Default minimum CPU configuration has been changed to 100 from 20. Jobs + using the old minimum value of 20 will have to be updated. * telemetry: Hostname is now emitted via a tag rather than within the key name. To maintain old behavior during an upgrade path specify `backwards_compatible_metrics` in the telemetry configuration. @@ -52,6 +60,9 @@ BUG FIXES: * core: Fix issue in which restoring periodic jobs could fail when a leader election occurs [[GH-3646](https://github.com/hashicorp/nomad/issues/3646)] + * core: Fix race condition in which rapid reprocessing of a blocked evaluation + may lead to the scheduler not seeing the results of the previous scheduling + event [[GH-3669](https://github.com/hashicorp/nomad/issues/3669)] * core: Fixed an issue where the leader server could get into a state where it was no longer performing the periodic leader loop duties after a barrier timeout error [[GH-3402](https://github.com/hashicorp/nomad/issues/3402)] diff --git a/api/resources.go b/api/resources.go index 8c547ee7e..1abcf209d 100644 --- a/api/resources.go +++ b/api/resources.go @@ -49,7 +49,7 @@ func DefaultResources() *Resources { // IN nomad/structs/structs.go and should be kept in sync. func MinResources() *Resources { return &Resources{ - CPU: helper.IntToPtr(100), + CPU: helper.IntToPtr(20), MemoryMB: helper.IntToPtr(10), IOPS: helper.IntToPtr(0), } diff --git a/client/alloc_watcher.go b/client/alloc_watcher.go index 03aa948f2..e54e7184d 100644 --- a/client/alloc_watcher.go +++ b/client/alloc_watcher.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "sync" + "syscall" "time" "github.com/hashicorp/consul/lib" @@ -452,6 +453,9 @@ func (p *remotePrevAlloc) streamAllocDir(ctx context.Context, resp io.ReadCloser tr := tar.NewReader(resp) defer resp.Close() + // Cache effective uid as we only run Chown if we're root + euid := syscall.Geteuid() + canceled := func() bool { select { case <-ctx.Done(): @@ -517,9 +521,14 @@ func (p *remotePrevAlloc) streamAllocDir(ctx context.Context, resp io.ReadCloser f.Close() return fmt.Errorf("error chmoding file %v", err) } - if err := f.Chown(hdr.Uid, hdr.Gid); err != nil { - f.Close() - return fmt.Errorf("error chowning file %v", err) + + // Can't change owner if not root. Returns false on + // Windows as Chown always errors there. + if euid == 0 { + if err := f.Chown(hdr.Uid, hdr.Gid); err != nil { + f.Close() + return fmt.Errorf("error chowning file %v", err) + } } // We write in chunks so that we can test if the client diff --git a/client/driver/lxc_test.go b/client/driver/lxc_test.go index ddf8260fd..e9de2dab7 100644 --- a/client/driver/lxc_test.go +++ b/client/driver/lxc_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/hashicorp/nomad/client/config" + ctestutil "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" lxc "gopkg.in/lxc/go-lxc.v2" @@ -61,6 +62,7 @@ func TestLxcDriver_Start_Wait(t *testing.T) { if !lxcPresent(t) { t.Skip("lxc not present") } + ctestutil.RequireRoot(t) task := &structs.Task{ Name: "foo", @@ -137,6 +139,7 @@ func TestLxcDriver_Open_Wait(t *testing.T) { if !lxcPresent(t) { t.Skip("lxc not present") } + ctestutil.RequireRoot(t) task := &structs.Task{ Name: "foo", diff --git a/client/driver/qemu_test.go b/client/driver/qemu_test.go index b0978ec9c..ebab5dae6 100644 --- a/client/driver/qemu_test.go +++ b/client/driver/qemu_test.go @@ -127,6 +127,7 @@ func TestQemuDriver_GracefulShutdown(t *testing.T) { t.Parallel() } ctestutils.QemuCompatible(t) + ctestutils.RequireRoot(t) task := &structs.Task{ Name: "linux", Driver: "qemu", diff --git a/client/testutil/driver_compatible.go b/client/testutil/driver_compatible.go index 996fca131..97f994c44 100644 --- a/client/testutil/driver_compatible.go +++ b/client/testutil/driver_compatible.go @@ -7,6 +7,13 @@ import ( "testing" ) +// RequireRoot skips tests unless running on a Unix as root. +func RequireRoot(t *testing.T) { + if syscall.Geteuid() != 0 { + t.Skip("Must run as root on Unix") + } +} + func ExecCompatible(t *testing.T) { if runtime.GOOS != "linux" || syscall.Geteuid() != 0 { t.Skip("Test only available running as root on linux") diff --git a/command/agent/consul/client.go b/command/agent/consul/client.go index c22cb8d74..b8db963ae 100644 --- a/command/agent/consul/client.go +++ b/command/agent/consul/client.go @@ -663,7 +663,7 @@ func (c *ServiceClient) checkRegs(ops *operations, allocID, serviceID string, se ip, port, err := getAddress(addrMode, portLabel, task.Resources.Networks, net) if err != nil { - return nil, fmt.Errorf("unable to get address for check %q: %v", check.Name, err) + return nil, fmt.Errorf("error getting address for check %q: %v", check.Name, err) } checkReg, err := createCheckReg(serviceID, checkID, check, ip, port) @@ -1036,6 +1036,11 @@ func createCheckReg(serviceID, checkID string, check *structs.ServiceCheck, host chkReg.Timeout = check.Timeout.String() chkReg.Interval = check.Interval.String() + // Require an address for http or tcp checks + if port == 0 && check.RequiresPort() { + return nil, fmt.Errorf("%s checks require an address", check.Type) + } + switch check.Type { case structs.ServiceCheckHTTP: proto := check.Protocol @@ -1089,9 +1094,15 @@ func isOldNomadService(id string) bool { return strings.HasPrefix(id, prefix) } -// getAddress returns the ip and port to use for a service or check. An error -// is returned if an ip and port cannot be determined. +// getAddress returns the IP and port to use for a service or check. If no port +// label is specified (an empty value), zero values are returned because no +// address could be resolved. func getAddress(addrMode, portLabel string, networks structs.Networks, driverNet *cstructs.DriverNetwork) (string, int, error) { + // No port label specified, no address can be assembled + if portLabel == "" { + return "", 0, nil + } + switch addrMode { case structs.AddressModeAuto: if driverNet.Advertise() { diff --git a/command/agent/consul/int_test.go b/command/agent/consul/int_test.go index ba9975532..ee0fff748 100644 --- a/command/agent/consul/int_test.go +++ b/command/agent/consul/int_test.go @@ -87,8 +87,17 @@ func TestConsul_Integration(t *testing.T) { task.Config = map[string]interface{}{ "run_for": "1h", } + // Choose a port that shouldn't be in use - task.Resources.Networks[0].ReservedPorts = []structs.Port{{Label: "http", Value: 3}} + netResource := &structs.NetworkResource{ + Device: "eth0", + IP: "127.0.0.1", + MBits: 50, + ReservedPorts: []structs.Port{{Label: "http", Value: 3}}, + } + alloc.Resources.Networks[0] = netResource + alloc.TaskResources["web"].Networks[0] = netResource + task.Resources.Networks[0] = netResource task.Services = []*structs.Service{ { Name: "httpd", @@ -96,13 +105,12 @@ func TestConsul_Integration(t *testing.T) { Tags: []string{"nomad", "test", "http"}, Checks: []*structs.ServiceCheck{ { - Name: "httpd-http-check", - Type: "http", - Path: "/", - Protocol: "http", - PortLabel: "http", - Interval: 9000 * time.Hour, - Timeout: 1, // fail as fast as possible + Name: "httpd-http-check", + Type: "http", + Path: "/", + Protocol: "http", + Interval: 9000 * time.Hour, + Timeout: 1, // fail as fast as possible }, { Name: "httpd-script-check", diff --git a/command/agent/consul/unit_test.go b/command/agent/consul/unit_test.go index 88acc4ee1..4d8123b88 100644 --- a/command/agent/consul/unit_test.go +++ b/command/agent/consul/unit_test.go @@ -1566,8 +1566,13 @@ func TestGetAddress(t *testing.T) { { Name: "InvalidMode", Mode: "invalid-mode", + PortLabel: "80", ErrContains: "invalid address mode", }, + { + Name: "EmptyIsOk", + Mode: structs.AddressModeHost, + }, } for _, tc := range cases { diff --git a/nomad/eval_endpoint_test.go b/nomad/eval_endpoint_test.go index 6955182c5..ea0c42a01 100644 --- a/nomad/eval_endpoint_test.go +++ b/nomad/eval_endpoint_test.go @@ -286,6 +286,73 @@ func TestEvalEndpoint_Dequeue_WaitIndex(t *testing.T) { } } +func TestEvalEndpoint_Dequeue_UpdateWaitIndex(t *testing.T) { + // test enqueueing an eval, updating a plan result for the same eval and de-queueing the eval + t.Parallel() + s1 := testServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer s1.Shutdown() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + alloc := mock.Alloc() + job := alloc.Job + alloc.Job = nil + + state := s1.fsm.State() + + if err := state.UpsertJob(999, job); err != nil { + t.Fatalf("err: %v", err) + } + + eval := mock.Eval() + eval.JobID = job.ID + + // Create an eval + if err := state.UpsertEvals(1, []*structs.Evaluation{eval}); err != nil { + t.Fatalf("err: %v", err) + } + + s1.evalBroker.Enqueue(eval) + + // Create a plan result and apply it with a later index + res := structs.ApplyPlanResultsRequest{ + AllocUpdateRequest: structs.AllocUpdateRequest{ + Alloc: []*structs.Allocation{alloc}, + Job: job, + }, + EvalID: eval.ID, + } + assert := assert.New(t) + err := state.UpsertPlanResults(1000, &res) + assert.Nil(err) + + // Dequeue the eval + get := &structs.EvalDequeueRequest{ + Schedulers: defaultSched, + SchedulerVersion: scheduler.SchedulerVersion, + WriteRequest: structs.WriteRequest{Region: "global"}, + } + var resp structs.EvalDequeueResponse + if err := msgpackrpc.CallWithCodec(codec, "Eval.Dequeue", get, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + // Ensure outstanding + token, ok := s1.evalBroker.Outstanding(eval.ID) + if !ok { + t.Fatalf("should be outstanding") + } + if token != resp.Token { + t.Fatalf("bad token: %#v %#v", token, resp.Token) + } + + if resp.WaitIndex != 1000 { + t.Fatalf("bad wait index; got %d; want %d", resp.WaitIndex, 1000) + } +} + func TestEvalEndpoint_Dequeue_Version_Mismatch(t *testing.T) { t.Parallel() s1 := testServer(t, func(c *Config) { diff --git a/nomad/fsm.go b/nomad/fsm.go index 7d0ef4fc4..0a004c836 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -1104,7 +1104,7 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error { return nil } -// reconcileSummaries re-calculates the queued allocations for every job that we +// reconcileQueuedAllocations re-calculates the queued allocations for every job that we // created a Job Summary during the snap shot restore func (n *nomadFSM) reconcileQueuedAllocations(index uint64) error { // Get all the jobs @@ -1142,7 +1142,7 @@ func (n *nomadFSM) reconcileQueuedAllocations(index uint64) error { Status: structs.EvalStatusPending, AnnotatePlan: true, } - + snap.UpsertEvals(100, []*structs.Evaluation{eval}) // Create the scheduler and run it sched, err := scheduler.NewScheduler(eval.Type, n.logger, snap, planner) if err != nil { diff --git a/nomad/fsm_test.go b/nomad/fsm_test.go index 4f9051add..fdaf681b7 100644 --- a/nomad/fsm_test.go +++ b/nomad/fsm_test.go @@ -944,9 +944,14 @@ func TestFSM_UpsertAllocs_StrippedResources(t *testing.T) { fsm := testFSM(t) alloc := mock.Alloc() + + // Need to remove mock dynamic port from alloc as it won't be computed + // in this test + alloc.TaskResources["web"].Networks[0].DynamicPorts[0].Value = 0 + fsm.State().UpsertJobSummary(1, mock.JobSummary(alloc.JobID)) job := alloc.Job - resources := alloc.Resources + origResources := alloc.Resources alloc.Resources = nil req := structs.AllocUpdateRequest{ Job: job, @@ -973,10 +978,10 @@ func TestFSM_UpsertAllocs_StrippedResources(t *testing.T) { alloc.AllocModifyIndex = out.AllocModifyIndex // Resources should be recomputed - resources.DiskMB = alloc.Job.TaskGroups[0].EphemeralDisk.SizeMB - alloc.Resources = resources + origResources.DiskMB = alloc.Job.TaskGroups[0].EphemeralDisk.SizeMB + alloc.Resources = origResources if !reflect.DeepEqual(alloc, out) { - t.Fatalf("bad: %#v %#v", alloc, out) + t.Fatalf("not equal: % #v", pretty.Diff(alloc, out)) } } @@ -1213,6 +1218,10 @@ func TestFSM_ApplyPlanResults(t *testing.T) { alloc.DeploymentID = d.ID + eval := mock.Eval() + eval.JobID = job.ID + fsm.State().UpsertEvals(1, []*structs.Evaluation{eval}) + fsm.State().UpsertJobSummary(1, mock.JobSummary(alloc.JobID)) req := structs.ApplyPlanResultsRequest{ AllocUpdateRequest: structs.AllocUpdateRequest{ @@ -1220,6 +1229,7 @@ func TestFSM_ApplyPlanResults(t *testing.T) { Alloc: []*structs.Allocation{alloc}, }, Deployment: d, + EvalID: eval.ID, } buf, err := structs.Encode(structs.ApplyPlanResultsRequestType, req) if err != nil { @@ -1233,32 +1243,32 @@ func TestFSM_ApplyPlanResults(t *testing.T) { // Verify the allocation is registered ws := memdb.NewWatchSet() + assert := assert.New(t) out, err := fsm.State().AllocByID(ws, alloc.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(err) alloc.CreateIndex = out.CreateIndex alloc.ModifyIndex = out.ModifyIndex alloc.AllocModifyIndex = out.AllocModifyIndex // Job should be re-attached alloc.Job = job - if !reflect.DeepEqual(alloc, out) { - t.Fatalf("bad: %#v %#v", alloc, out) - } + assert.Equal(alloc, out) dout, err := fsm.State().DeploymentByID(ws, d.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if tg, ok := dout.TaskGroups[alloc.TaskGroup]; !ok || tg.PlacedAllocs != 1 { - t.Fatalf("err: %v %v", tg, err) - } + assert.Nil(err) + tg, ok := dout.TaskGroups[alloc.TaskGroup] + assert.True(ok) + assert.NotNil(tg) + assert.Equal(1, tg.PlacedAllocs) // Ensure that the original job is used evictAlloc := alloc.Copy() job = mock.Job() job.Priority = 123 + eval = mock.Eval() + eval.JobID = job.ID + + fsm.State().UpsertEvals(2, []*structs.Evaluation{eval}) evictAlloc.Job = nil evictAlloc.DesiredStatus = structs.AllocDesiredStatusEvict @@ -1267,28 +1277,28 @@ func TestFSM_ApplyPlanResults(t *testing.T) { Job: job, Alloc: []*structs.Allocation{evictAlloc}, }, + EvalID: eval.ID, } buf, err = structs.Encode(structs.ApplyPlanResultsRequestType, req2) - if err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(err) - resp = fsm.Apply(makeLog(buf)) - if resp != nil { - t.Fatalf("resp: %v", resp) - } + log := makeLog(buf) + //set the index to something other than 1 + log.Index = 25 + resp = fsm.Apply(log) + assert.Nil(resp) // Verify we are evicted out, err = fsm.State().AllocByID(ws, alloc.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if out.DesiredStatus != structs.AllocDesiredStatusEvict { - t.Fatalf("alloc found!") - } - if out.Job == nil || out.Job.Priority == 123 { - t.Fatalf("bad job") - } + assert.Nil(err) + assert.Equal(structs.AllocDesiredStatusEvict, out.DesiredStatus) + assert.NotNil(out.Job) + assert.NotEqual(123, out.Job.Priority) + + evalOut, err := fsm.State().EvalByID(ws, eval.ID) + assert.Nil(err) + assert.Equal(log.Index, evalOut.ModifyIndex) + } func TestFSM_DeploymentStatusUpdate(t *testing.T) { diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index a47588ad8..41857f92f 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -1088,6 +1088,8 @@ func (j *Job) Plan(args *structs.JobPlanRequest, reply *structs.JobPlanResponse) AnnotatePlan: true, } + snap.UpsertEvals(100, []*structs.Evaluation{eval}) + // Create an in-memory Planner that returns no errors and stores the // submitted plan and created evals. planner := &scheduler.Harness{ diff --git a/nomad/mock/mock.go b/nomad/mock/mock.go index d0c889b09..c4921a644 100644 --- a/nomad/mock/mock.go +++ b/nomad/mock/mock.go @@ -292,7 +292,7 @@ func Alloc() *structs.Allocation { IP: "192.168.0.100", ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}}, MBits: 50, - DynamicPorts: []structs.Port{{Label: "http"}}, + DynamicPorts: []structs.Port{{Label: "http", Value: 9876}}, }, }, }, diff --git a/nomad/plan_apply.go b/nomad/plan_apply.go index 5d2c29bcf..44f78e2c8 100644 --- a/nomad/plan_apply.go +++ b/nomad/plan_apply.go @@ -135,6 +135,7 @@ func (s *Server) applyPlan(plan *structs.Plan, result *structs.PlanResult, snap }, Deployment: result.Deployment, DeploymentUpdates: result.DeploymentUpdates, + EvalID: plan.EvalID, } for _, updateList := range result.NodeUpdate { req.Alloc = append(req.Alloc, updateList...) diff --git a/nomad/plan_apply_test.go b/nomad/plan_apply_test.go index 7d3195d92..93e44e617 100644 --- a/nomad/plan_apply_test.go +++ b/nomad/plan_apply_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/hashicorp/raft" + "github.com/stretchr/testify/assert" ) const ( @@ -65,7 +66,7 @@ func TestPlanApply_applyPlan(t *testing.T) { defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC) - // Register ndoe + // Register node node := mock.Node() testRegisterNode(t, s1, node) @@ -91,6 +92,13 @@ func TestPlanApply_applyPlan(t *testing.T) { // Register alloc, deployment and deployment update alloc := mock.Alloc() s1.State().UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)) + // Create an eval + eval := mock.Eval() + eval.JobID = alloc.JobID + if err := s1.State().UpsertEvals(1, []*structs.Evaluation{eval}); err != nil { + t.Fatalf("err: %v", err) + } + planRes := &structs.PlanResult{ NodeAllocation: map[string][]*structs.Allocation{ node.ID: {alloc}, @@ -110,73 +118,55 @@ func TestPlanApply_applyPlan(t *testing.T) { Job: alloc.Job, Deployment: dnew, DeploymentUpdates: updates, + EvalID: eval.ID, } // Apply the plan future, err := s1.applyPlan(plan, planRes, snap) - if err != nil { - t.Fatalf("err: %v", err) - } + assert := assert.New(t) + assert.Nil(err) // Verify our optimistic snapshot is updated ws := memdb.NewWatchSet() - if out, err := snap.AllocByID(ws, alloc.ID); err != nil || out == nil { - t.Fatalf("bad: %v %v", out, err) - } + allocOut, err := snap.AllocByID(ws, alloc.ID) + assert.Nil(err) + assert.NotNil(allocOut) - if out, err := snap.DeploymentByID(ws, plan.Deployment.ID); err != nil || out == nil { - t.Fatalf("bad: %v %v", out, err) - } + deploymentOut, err := snap.DeploymentByID(ws, plan.Deployment.ID) + assert.Nil(err) + assert.NotNil(deploymentOut) // Check plan does apply cleanly index, err := planWaitFuture(future) - if err != nil { - t.Fatalf("err: %v", err) - } - if index == 0 { - t.Fatalf("bad: %d", index) - } + assert.Nil(err) + assert.NotEqual(0, index) // Lookup the allocation fsmState := s1.fsm.State() - out, err := fsmState.AllocByID(ws, alloc.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if out == nil { - t.Fatalf("missing alloc") - } - - if out.CreateTime <= 0 { - t.Fatalf("invalid create time %v", out.CreateTime) - } - if out.ModifyTime <= 0 { - t.Fatalf("invalid modify time %v", out.CreateTime) - } - if out.CreateTime != out.ModifyTime { - t.Fatalf("create time %v modify time %v must be equal", out.CreateTime, out.ModifyTime) - } + allocOut, err = fsmState.AllocByID(ws, alloc.ID) + assert.Nil(err) + assert.NotNil(allocOut) + assert.True(allocOut.CreateTime > 0) + assert.True(allocOut.ModifyTime > 0) + assert.Equal(allocOut.CreateTime, allocOut.ModifyTime) // Lookup the new deployment dout, err := fsmState.DeploymentByID(ws, plan.Deployment.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if dout == nil { - t.Fatalf("missing deployment") - } + assert.Nil(err) + assert.NotNil(dout) // Lookup the updated deployment dout2, err := fsmState.DeploymentByID(ws, oldDeployment.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if dout2 == nil { - t.Fatalf("missing deployment") - } - if dout2.Status != desiredStatus || dout2.StatusDescription != desiredStatusDescription { - t.Fatalf("bad status: %#v", dout2) - } + assert.Nil(err) + assert.NotNil(dout2) + assert.Equal(desiredStatus, dout2.Status) + assert.Equal(desiredStatusDescription, dout2.StatusDescription) + + // Lookup updated eval + evalOut, err := fsmState.EvalByID(ws, eval.ID) + assert.Nil(err) + assert.NotNil(evalOut) + assert.Equal(index, evalOut.ModifyIndex) // Evict alloc, Register alloc2 allocEvict := new(structs.Allocation) @@ -197,60 +187,43 @@ func TestPlanApply_applyPlan(t *testing.T) { // Snapshot the state snap, err = s1.State().Snapshot() - if err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(err) // Apply the plan plan = &structs.Plan{ - Job: job, + Job: job, + EvalID: eval.ID, } future, err = s1.applyPlan(plan, planRes, snap) - if err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(err) // Check that our optimistic view is updated - if out, _ := snap.AllocByID(ws, allocEvict.ID); out.DesiredStatus != structs.AllocDesiredStatusEvict { - t.Fatalf("bad: %#v", out) - } + out, _ := snap.AllocByID(ws, allocEvict.ID) + assert.Equal(structs.AllocDesiredStatusEvict, out.DesiredStatus) // Verify plan applies cleanly index, err = planWaitFuture(future) - if err != nil { - t.Fatalf("err: %v", err) - } - if index == 0 { - t.Fatalf("bad: %d", index) - } + assert.Nil(err) + assert.NotEqual(0, index) // Lookup the allocation - out, err = s1.fsm.State().AllocByID(ws, alloc.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if out.DesiredStatus != structs.AllocDesiredStatusEvict { - t.Fatalf("should be evicted alloc: %#v", out) - } - if out.Job == nil { - t.Fatalf("missing job") - } - - if out.ModifyTime <= 0 { - t.Fatalf("must have valid modify time but was %v", out.ModifyTime) - } + allocOut, err = s1.fsm.State().AllocByID(ws, alloc.ID) + assert.Nil(err) + assert.Equal(structs.AllocDesiredStatusEvict, allocOut.DesiredStatus) + assert.NotNil(allocOut.Job) + assert.True(allocOut.ModifyTime > 0) // Lookup the allocation - out, err = s1.fsm.State().AllocByID(ws, alloc2.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if out == nil { - t.Fatalf("missing alloc") - } - if out.Job == nil { - t.Fatalf("missing job") - } + allocOut, err = s1.fsm.State().AllocByID(ws, alloc2.ID) + assert.Nil(err) + assert.NotNil(allocOut) + assert.NotNil(allocOut.Job) + + // Lookup updated eval + evalOut, err = fsmState.EvalByID(ws, eval.ID) + assert.Nil(err) + assert.NotNil(evalOut) + assert.Equal(index, evalOut.ModifyIndex) } func TestPlanApply_EvalPlan_Simple(t *testing.T) { diff --git a/nomad/search_endpoint.go b/nomad/search_endpoint.go index 61931a5e8..9eb892dd3 100644 --- a/nomad/search_endpoint.go +++ b/nomad/search_endpoint.go @@ -2,7 +2,9 @@ package nomad import ( "strings" + "time" + metrics "github.com/armon/go-metrics" memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/state" @@ -114,6 +116,11 @@ func roundUUIDDownIfOdd(prefix string, context structs.Context) string { // PrefixSearch is used to list matches for a given prefix, and returns // matching jobs, evaluations, allocations, and/or nodes. func (s *Search) PrefixSearch(args *structs.SearchRequest, reply *structs.SearchResponse) error { + if done, err := s.srv.forward("Search.PrefixSearch", args, args, reply); done { + return err + } + defer metrics.MeasureSince([]string{"nomad", "search", "prefix_search"}, time.Now()) + aclObj, err := s.srv.ResolveToken(args.AuthToken) if err != nil { return err diff --git a/nomad/search_endpoint_test.go b/nomad/search_endpoint_test.go index 7cd6fe6b2..2631b6958 100644 --- a/nomad/search_endpoint_test.go +++ b/nomad/search_endpoint_test.go @@ -712,3 +712,47 @@ func TestSearch_PrefixSearch_RoundDownToEven(t *testing.T) { assert.Equal(1, len(resp.Matches[structs.Jobs])) assert.Equal(job.ID, resp.Matches[structs.Jobs][0]) } + +func TestSearch_PrefixSearch_MultiRegion(t *testing.T) { + assert := assert.New(t) + + jobName := "exampleexample" + + t.Parallel() + s1 := testServer(t, func(c *Config) { + c.NumSchedulers = 0 + c.Region = "foo" + }) + defer s1.Shutdown() + + s2 := testServer(t, func(c *Config) { + c.NumSchedulers = 0 + c.Region = "bar" + }) + defer s2.Shutdown() + + testJoin(t, s1, s2) + testutil.WaitForLeader(t, s1.RPC) + + job := registerAndVerifyJob(s1, t, jobName, 0) + + req := &structs.SearchRequest{ + Prefix: "", + Context: structs.Jobs, + QueryOptions: structs.QueryOptions{ + Region: "foo", + Namespace: job.Namespace, + }, + } + + codec := rpcClient(t, s2) + + var resp structs.SearchResponse + if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + assert.Equal(1, len(resp.Matches[structs.Jobs])) + assert.Equal(job.ID, resp.Matches[structs.Jobs][0]) + assert.Equal(uint64(jobIndex), resp.Index) +} diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 0091055b3..f837a6106 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -196,6 +196,16 @@ func (s *StateStore) UpsertPlanResults(index uint64, results *structs.ApplyPlanR return err } + // COMPAT: Nomad versions before 0.7.1 did not include the eval ID when + // applying the plan. Thus while we are upgrading, we ignore updating the + // modify index of evaluations from older plans. + if results.EvalID != "" { + // Update the modify index of the eval id + if err := s.updateEvalModifyIndex(txn, index, results.EvalID); err != nil { + return err + } + } + txn.Commit() return nil } @@ -1486,6 +1496,34 @@ func (s *StateStore) nestedUpsertEval(txn *memdb.Txn, index uint64, eval *struct return nil } +// updateEvalModifyIndex is used to update the modify index of an evaluation that has been +// through a scheduler pass. This is done as part of plan apply. It ensures that when a subsequent +// scheduler workers process a re-queued evaluation it sees any partial updates from the plan apply. +func (s *StateStore) updateEvalModifyIndex(txn *memdb.Txn, index uint64, evalID string) error { + // Lookup the evaluation + existing, err := txn.First("evals", "id", evalID) + if err != nil { + return fmt.Errorf("eval lookup failed: %v", err) + } + if existing == nil { + err := fmt.Errorf("unable to find eval id %q", evalID) + s.logger.Printf("[ERR] state_store: %v", err) + return err + } + eval := existing.(*structs.Evaluation).Copy() + // Update the indexes + eval.ModifyIndex = index + + // Insert the eval + if err := txn.Insert("evals", eval); err != nil { + return fmt.Errorf("eval insert failed: %v", err) + } + if err := txn.Insert("index", &IndexEntry{"evals", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + return nil +} + // DeleteEval is used to delete an evaluation func (s *StateStore) DeleteEval(index uint64, evals []string, allocs []string) error { txn := s.db.Txn(true) diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 38081ccfc..08179bdfd 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -100,40 +100,43 @@ func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing t.Fatalf("err: %v", err) } + eval := mock.Eval() + eval.JobID = job.ID + + // Create an eval + if err := state.UpsertEvals(1, []*structs.Evaluation{eval}); err != nil { + t.Fatalf("err: %v", err) + } + // Create a plan result res := structs.ApplyPlanResultsRequest{ AllocUpdateRequest: structs.AllocUpdateRequest{ Alloc: []*structs.Allocation{alloc}, Job: job, }, + EvalID: eval.ID, } - + assert := assert.New(t) err := state.UpsertPlanResults(1000, &res) - if err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(err) ws := memdb.NewWatchSet() out, err := state.AllocByID(ws, alloc.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if !reflect.DeepEqual(alloc, out) { - t.Fatalf("bad: %#v %#v", alloc, out) - } + assert.Nil(err) + assert.Equal(alloc, out) index, err := state.Index("allocs") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1000 { - t.Fatalf("bad: %d", index) - } + assert.Nil(err) + assert.EqualValues(1000, index) if watchFired(ws) { t.Fatalf("bad") } + + evalOut, err := state.EvalByID(ws, eval.ID) + assert.Nil(err) + assert.NotNil(evalOut) + assert.EqualValues(1000, evalOut.ModifyIndex) } // This test checks that the deployment is created and allocations count towards @@ -154,6 +157,14 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { t.Fatalf("err: %v", err) } + eval := mock.Eval() + eval.JobID = job.ID + + // Create an eval + if err := state.UpsertEvals(1, []*structs.Evaluation{eval}); err != nil { + t.Fatalf("err: %v", err) + } + // Create a plan result res := structs.ApplyPlanResultsRequest{ AllocUpdateRequest: structs.AllocUpdateRequest{ @@ -161,6 +172,7 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { Job: job, }, Deployment: d, + EvalID: eval.ID, } err := state.UpsertPlanResults(1000, &res) @@ -169,31 +181,24 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { } ws := memdb.NewWatchSet() + assert := assert.New(t) out, err := state.AllocByID(ws, alloc.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if !reflect.DeepEqual(alloc, out) { - t.Fatalf("bad: %#v %#v", alloc, out) - } + assert.Nil(err) + assert.Equal(alloc, out) dout, err := state.DeploymentByID(ws, d.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if dout == nil { - t.Fatalf("bad: nil deployment") - } + assert.Nil(err) + assert.NotNil(dout) tg, ok := dout.TaskGroups[alloc.TaskGroup] - if !ok { - t.Fatalf("bad: nil deployment state") - } - if tg == nil || tg.PlacedAllocs != 2 { - t.Fatalf("bad: %v", dout) - } + assert.True(ok) + assert.NotNil(tg) + assert.Equal(2, tg.PlacedAllocs) + + evalOut, err := state.EvalByID(ws, eval.ID) + assert.Nil(err) + assert.NotNil(evalOut) + assert.EqualValues(1000, evalOut.ModifyIndex) if watchFired(ws) { t.Fatalf("bad") @@ -215,6 +220,7 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { Job: job, }, Deployment: d2, + EvalID: eval.ID, } err = state.UpsertPlanResults(1001, &res) @@ -223,21 +229,18 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { } dout, err = state.DeploymentByID(ws, d2.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if dout == nil { - t.Fatalf("bad: nil deployment") - } + assert.Nil(err) + assert.NotNil(dout) tg, ok = dout.TaskGroups[alloc.TaskGroup] - if !ok { - t.Fatalf("bad: nil deployment state") - } - if tg == nil || tg.PlacedAllocs != 2 { - t.Fatalf("bad: %v", dout) - } + assert.True(ok) + assert.NotNil(tg) + assert.Equal(2, tg.PlacedAllocs) + + evalOut, err = state.EvalByID(ws, eval.ID) + assert.Nil(err) + assert.NotNil(evalOut) + assert.EqualValues(1001, evalOut.ModifyIndex) } // This test checks that deployment updates are applied correctly @@ -258,6 +261,13 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { t.Fatalf("err: %v", err) } + eval := mock.Eval() + eval.JobID = job.ID + + // Create an eval + if err := state.UpsertEvals(1, []*structs.Evaluation{eval}); err != nil { + t.Fatalf("err: %v", err) + } alloc := mock.Alloc() alloc.Job = nil @@ -280,41 +290,37 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { }, Deployment: dnew, DeploymentUpdates: []*structs.DeploymentStatusUpdate{update}, + EvalID: eval.ID, } err := state.UpsertPlanResults(1000, &res) if err != nil { t.Fatalf("err: %v", err) } - + assert := assert.New(t) ws := memdb.NewWatchSet() // Check the deployments are correctly updated. dout, err := state.DeploymentByID(ws, dnew.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if dout == nil { - t.Fatalf("bad: nil deployment") - } + assert.Nil(err) + assert.NotNil(dout) tg, ok := dout.TaskGroups[alloc.TaskGroup] - if !ok { - t.Fatalf("bad: nil deployment state") - } - if tg == nil || tg.PlacedAllocs != 1 { - t.Fatalf("bad: %v", dout) - } + assert.True(ok) + assert.NotNil(tg) + assert.Equal(1, tg.PlacedAllocs) doutstandingout, err := state.DeploymentByID(ws, doutstanding.ID) - if err != nil || doutstandingout == nil { - t.Fatalf("bad: %v %v", err, doutstandingout) - } - if doutstandingout.Status != update.Status || doutstandingout.StatusDescription != update.StatusDescription || doutstandingout.ModifyIndex != 1000 { - t.Fatalf("bad: %v", doutstandingout) - } + assert.Nil(err) + assert.NotNil(doutstandingout) + assert.Equal(update.Status, doutstandingout.Status) + assert.Equal(update.StatusDescription, doutstandingout.StatusDescription) + assert.EqualValues(1000, doutstandingout.ModifyIndex) + evalOut, err := state.EvalByID(ws, eval.ID) + assert.Nil(err) + assert.NotNil(evalOut) + assert.EqualValues(1000, evalOut.ModifyIndex) if watchFired(ws) { t.Fatalf("bad") } diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index b8161bf3d..bf299ec92 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -514,6 +514,14 @@ type ApplyPlanResultsRequest struct { // deployments. This allows the scheduler to cancel any unneeded deployment // because the job is stopped or the update block is removed. DeploymentUpdates []*DeploymentStatusUpdate + + // EvalID is the eval ID of the plan being applied. The modify index of the + // evaluation is updated as part of applying the plan to ensure that subsequent + // scheduling events for the same job will wait for the index that last produced + // state changes. This is necessary for blocked evaluations since they can be + // processed many times, potentially making state updates, without the state of + // the evaluation itself being updated. + EvalID string } // AllocUpdateRequest is used to submit changes to allocations, either @@ -1245,7 +1253,7 @@ func DefaultResources() *Resources { // api/resources.go and should be kept in sync. func MinResources() *Resources { return &Resources{ - CPU: 100, + CPU: 20, MemoryMB: 10, IOPS: 0, } @@ -3123,8 +3131,8 @@ func (s *Service) Validate() error { } for _, c := range s.Checks { - if s.PortLabel == "" && c.RequiresPort() { - mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but the service %+q has no port", c.Name, s.Name)) + if s.PortLabel == "" && c.PortLabel == "" && c.RequiresPort() { + mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but neither check nor service %+q have a port", c.Name, s.Name)) continue } @@ -3569,8 +3577,16 @@ func validateServices(t *Task) error { } } + // Iterate over a sorted list of keys to make error listings stable + keys := make([]string, 0, len(servicePorts)) + for p := range servicePorts { + keys = append(keys, p) + } + sort.Strings(keys) + // Ensure all ports referenced in services exist. - for servicePort, services := range servicePorts { + for _, servicePort := range keys { + services := servicePorts[servicePort] _, ok := portLabels[servicePort] if !ok { names := make([]string, 0, len(services)) diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index 26b2d2a02..4897e0422 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -1235,55 +1235,93 @@ func TestTask_Validate_Service_Check(t *testing.T) { // TestTask_Validate_Service_Check_AddressMode asserts that checks do not // inherit address mode but do inherit ports. func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { - task := &Task{ - Resources: &Resources{ - Networks: []*NetworkResource{ - { - DynamicPorts: []Port{ - { - Label: "http", - Value: 9999, + getTask := func(s *Service) *Task { + return &Task{ + Resources: &Resources{ + Networks: []*NetworkResource{ + { + DynamicPorts: []Port{ + { + Label: "http", + Value: 9999, + }, }, }, }, }, - }, - Services: []*Service{ - { + Services: []*Service{s}, + } + } + + cases := []struct { + Service *Service + ErrContains string + }{ + { + Service: &Service{ Name: "invalid-driver", PortLabel: "80", AddressMode: "host", }, - { - Name: "http-driver", + ErrContains: `port label "80" referenced`, + }, + { + Service: &Service{ + Name: "http-driver-fail-1", PortLabel: "80", AddressMode: "driver", Checks: []*ServiceCheck{ { - // Should fail Name: "invalid-check-1", Type: "tcp", Interval: time.Second, Timeout: time.Second, }, + }, + }, + ErrContains: `check "invalid-check-1" cannot use a numeric port`, + }, + { + Service: &Service{ + Name: "http-driver-fail-2", + PortLabel: "80", + AddressMode: "driver", + Checks: []*ServiceCheck{ { - // Should fail Name: "invalid-check-2", Type: "tcp", PortLabel: "80", Interval: time.Second, Timeout: time.Second, }, + }, + }, + ErrContains: `check "invalid-check-2" cannot use a numeric port`, + }, + { + Service: &Service{ + Name: "http-driver-fail-3", + PortLabel: "80", + AddressMode: "driver", + Checks: []*ServiceCheck{ { - // Should fail Name: "invalid-check-3", Type: "tcp", PortLabel: "missing-port-label", Interval: time.Second, Timeout: time.Second, }, + }, + }, + ErrContains: `port label "missing-port-label" referenced`, + }, + { + Service: &Service{ + Name: "http-driver-passes", + PortLabel: "80", + AddressMode: "driver", + Checks: []*ServiceCheck{ { - // Should pass Name: "valid-script-check", Type: "script", Command: "ok", @@ -1291,7 +1329,6 @@ func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { Timeout: time.Second, }, { - // Should pass Name: "valid-host-check", Type: "tcp", PortLabel: "http", @@ -1299,7 +1336,6 @@ func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { Timeout: time.Second, }, { - // Should pass Name: "valid-driver-check", Type: "tcp", AddressMode: "driver", @@ -1309,23 +1345,65 @@ func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { }, }, }, - } - err := validateServices(task) - if err == nil { - t.Fatalf("expected errors but task validated successfully") - } - errs := err.(*multierror.Error).Errors - if expected := 4; len(errs) != expected { - for i, err := range errs { - t.Logf("errs[%d] -> %s", i, err) - } - t.Fatalf("expected %d errors but found %d", expected, len(errs)) + { + Service: &Service{ + Name: "empty-address-3673-passes-1", + Checks: []*ServiceCheck{ + { + Name: "valid-port-label", + Type: "tcp", + PortLabel: "http", + Interval: time.Second, + Timeout: time.Second, + }, + { + Name: "empty-is-ok", + Type: "script", + Command: "ok", + Interval: time.Second, + Timeout: time.Second, + }, + }, + }, + }, + { + Service: &Service{ + Name: "empty-address-3673-passes-2", + }, + }, + { + Service: &Service{ + Name: "empty-address-3673-fails", + Checks: []*ServiceCheck{ + { + Name: "empty-is-not-ok", + Type: "tcp", + Interval: time.Second, + Timeout: time.Second, + }, + }, + }, + ErrContains: `invalid: check requires a port but neither check nor service`, + }, } - assert.Contains(t, errs[0].Error(), `check "invalid-check-1" cannot use a numeric port`) - assert.Contains(t, errs[1].Error(), `check "invalid-check-2" cannot use a numeric port`) - assert.Contains(t, errs[2].Error(), `port label "80" referenced`) - assert.Contains(t, errs[3].Error(), `port label "missing-port-label" referenced`) + for _, tc := range cases { + tc := tc + task := getTask(tc.Service) + t.Run(tc.Service.Name, func(t *testing.T) { + err := validateServices(task) + if err == nil && tc.ErrContains == "" { + // Ok! + return + } + if err == nil { + t.Fatalf("no error returned. expected: %s", tc.ErrContains) + } + if !strings.Contains(err.Error(), tc.ErrContains) { + t.Fatalf("expected %q but found: %v", tc.ErrContains, err) + } + }) + } } func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { diff --git a/nomad/worker_test.go b/nomad/worker_test.go index 627887e31..faa9cc104 100644 --- a/nomad/worker_test.go +++ b/nomad/worker_test.go @@ -341,6 +341,7 @@ func TestWorker_SubmitPlan(t *testing.T) { eval1 := mock.Eval() eval1.JobID = job.ID s1.fsm.State().UpsertJob(1000, job) + s1.fsm.State().UpsertEvals(1000, []*structs.Evaluation{eval1}) // Create the register request s1.evalBroker.Enqueue(eval1) diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index 02296ff76..0e88b4255 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -35,8 +35,11 @@ func TestServiceSched_JobRegister(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -118,7 +121,9 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewServiceScheduler, eval); err != nil { @@ -149,7 +154,9 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) h1 := NewHarnessWithState(t, h.State) if err := h1.Process(NewServiceScheduler, eval); err != nil { t.Fatalf("err: %v", err) @@ -206,8 +213,11 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -275,8 +285,11 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -364,8 +377,11 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -456,7 +472,9 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -548,7 +566,9 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation assert.Nil(h.Process(NewServiceScheduler, eval), "Process") @@ -602,7 +622,9 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) { TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, AnnotatePlan: true, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -679,7 +701,9 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -720,8 +744,11 @@ func TestServiceSched_JobRegister_AllocFail(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -802,8 +829,11 @@ func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -899,8 +929,9 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } - + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -1016,8 +1047,11 @@ func TestServiceSched_Plan_Partial_Progress(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -1245,7 +1279,9 @@ func TestServiceSched_JobModify(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1329,7 +1365,9 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1436,7 +1474,9 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1530,7 +1570,9 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1635,7 +1677,9 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1736,7 +1780,9 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1844,7 +1890,9 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1958,7 +2006,9 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2044,7 +2094,9 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobDeregister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2111,7 +2163,9 @@ func TestServiceSched_JobDeregister_Stopped(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobDeregister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2201,7 +2255,9 @@ func TestServiceSched_NodeDown(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2267,7 +2323,9 @@ func TestServiceSched_NodeUpdate(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2318,7 +2376,9 @@ func TestServiceSched_NodeDrain(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2419,8 +2479,11 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -2493,7 +2556,9 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2550,7 +2615,9 @@ func TestServiceSched_NodeDrain_UpdateStrategy(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2608,7 +2675,9 @@ func TestServiceSched_RetryLimit(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2664,7 +2733,9 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -2719,7 +2790,9 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -2781,7 +2854,9 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -2841,7 +2916,9 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -2904,7 +2981,9 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -2985,7 +3064,9 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -3039,8 +3120,11 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewBatchScheduler, eval) if err != nil { @@ -3102,8 +3186,11 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewBatchScheduler, eval) if err != nil { @@ -3154,8 +3241,11 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewBatchScheduler, eval) if err != nil { @@ -3197,7 +3287,9 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewServiceScheduler, eval); err != nil { t.Fatalf("err: %v", err) @@ -3226,7 +3318,10 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { Priority: job1.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job1.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) + // Process the evaluation if err := h1.Process(NewServiceScheduler, eval1); err != nil { t.Fatalf("err: %v", err) @@ -3287,8 +3382,11 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: alloc.Job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -3344,8 +3442,11 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobDeregister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -3413,8 +3514,11 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { diff --git a/scheduler/system_sched_test.go b/scheduler/system_sched_test.go index 8fe30b6cc..8947d2c5d 100644 --- a/scheduler/system_sched_test.go +++ b/scheduler/system_sched_test.go @@ -32,7 +32,9 @@ func TestSystemSched_JobRegister(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -105,7 +107,9 @@ func TestSystemeSched_JobRegister_StickyAllocs(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewSystemScheduler, eval); err != nil { @@ -134,7 +138,9 @@ func TestSystemeSched_JobRegister_StickyAllocs(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) h1 := NewHarnessWithState(t, h.State) if err := h1.Process(NewSystemScheduler, eval); err != nil { t.Fatalf("err: %v", err) @@ -181,7 +187,9 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewSystemScheduler, eval); err != nil { @@ -207,7 +215,9 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { Priority: job1.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job1.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h1.Process(NewSystemScheduler, eval1); err != nil { @@ -241,8 +251,9 @@ func TestSystemSched_ExhaustResources(t *testing.T) { Priority: svcJob.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: svcJob.ID, + Status: structs.EvalStatusPending, } - + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -260,8 +271,9 @@ func TestSystemSched_ExhaustResources(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } - + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewSystemScheduler, eval1); err != nil { t.Fatalf("err: %v", err) @@ -307,7 +319,9 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, AnnotatePlan: true, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -405,8 +419,9 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, + Status: structs.EvalStatusPending, } - + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { @@ -472,8 +487,9 @@ func TestSystemSched_JobRegister_AllocFail(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } - + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { @@ -542,7 +558,9 @@ func TestSystemSched_JobModify(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -633,8 +651,9 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } - + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { @@ -728,7 +747,9 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -822,7 +843,9 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobDeregister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -894,7 +917,9 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { Priority: 50, TriggeredBy: structs.EvalTriggerJobDeregister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -956,7 +981,9 @@ func TestSystemSched_NodeDown(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1021,7 +1048,9 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1080,7 +1109,9 @@ func TestSystemSched_NodeDrain(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1143,7 +1174,9 @@ func TestSystemSched_NodeUpdate(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1180,7 +1213,9 @@ func TestSystemSched_RetryLimit(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1230,7 +1265,9 @@ func TestSystemSched_Queued_With_Constraints(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1264,7 +1301,9 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewSystemScheduler, eval); err != nil { t.Fatalf("err: %v", err) @@ -1299,7 +1338,9 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { Priority: job1.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job1.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) // Process the evaluation if err := h1.Process(NewSystemScheduler, eval1); err != nil { t.Fatalf("err: %v", err) @@ -1389,7 +1430,9 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1460,7 +1503,9 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, + Status: structs.EvalStatusPending, } + noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) diff --git a/scheduler/testing.go b/scheduler/testing.go index fb631d444..a04b99ce8 100644 --- a/scheduler/testing.go +++ b/scheduler/testing.go @@ -122,6 +122,7 @@ func (h *Harness) SubmitPlan(plan *structs.Plan) (*structs.PlanResult, State, er }, Deployment: plan.Deployment, DeploymentUpdates: plan.DeploymentUpdates, + EvalID: plan.EvalID, } // Apply the full plan diff --git a/scripts/travis.sh b/scripts/travis.sh index d17aa91e6..8e500f793 100755 --- a/scripts/travis.sh +++ b/scripts/travis.sh @@ -9,7 +9,7 @@ trap 'kill ${PING_LOOP_PID}' EXIT HUP INT QUIT TERM if [ "$RUN_STATIC_CHECKS" ]; then make check - if [ "$TRAVIS_OS_NAME" == "linux" ]; then + if [ "$TRAVIS_OS_NAME" == "linux" ]; then make checkscripts fi fi diff --git a/ui/.nvmrc b/ui/.nvmrc new file mode 100644 index 000000000..1e8b31496 --- /dev/null +++ b/ui/.nvmrc @@ -0,0 +1 @@ +6 diff --git a/version/version.go b/version/version.go index d9068db05..6e4afe5c0 100644 --- a/version/version.go +++ b/version/version.go @@ -11,12 +11,12 @@ var ( GitDescribe string // The main version number that is being run at the moment. - Version = "0.7.1" + Version = "0.8.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. - VersionPrerelease = "rc1" + VersionPrerelease = "dev" // VersionMetadata is metadata further describing the build type. VersionMetadata = "" diff --git a/website/config.rb b/website/config.rb index fe55d2bc4..46bdc3be8 100644 --- a/website/config.rb +++ b/website/config.rb @@ -2,7 +2,7 @@ set :base_url, "https://www.nomadproject.io/" activate :hashicorp do |h| h.name = "nomad" - h.version = "0.7.0" + h.version = "0.7.1" h.github_slug = "hashicorp/nomad" end diff --git a/website/source/api/json-jobs.html.md b/website/source/api/json-jobs.html.md index 0090a7feb..1fe359af9 100644 --- a/website/source/api/json-jobs.html.md +++ b/website/source/api/json-jobs.html.md @@ -300,13 +300,17 @@ The `Task` object supports the following keys: } ``` +- `KillSignal` - Specifies a configurable kill signal for a task, where the + default is SIGINT. Note that this is only supported for drivers which accept + sending signals (currently `docker`, `exec`, `raw_exec`, and `java` drivers). + - `KillTimeout` - `KillTimeout` is a time duration in nanoseconds. It can be used to configure the time between signaling a task it will be killed and actually killing it. Drivers first sends a task the `SIGINT` signal and then sends `SIGTERM` if the task doesn't die after the `KillTimeout` duration has elapsed. The default `KillTimeout` is 5 seconds. -- `leader` - Specifies whether the task is the leader task of the task group. If +- `Leader` - Specifies whether the task is the leader task of the task group. If set to true, when the leader task completes, all other tasks within the task group will be gracefully shutdown. @@ -346,6 +350,23 @@ The `Task` object supports the following keys: defined in the resources block. This could be a label of either a dynamic or a static port. + - `AddressMode`: Specifies what address (host or driver-specific) this + service should advertise. This setting is supported in Docker since + Nomad 0.6 and rkt since Nomad 0.7. Valid options are: + + - `auto` - Allows the driver to determine whether the host or driver + address should be used. Defaults to `host` and only implemented by + Docker. If you use a Docker network plugin such as weave, Docker will + automatically use its address. + + - `driver` - Use the IP specified by the driver, and the port specified + in a port map. A numeric port may be specified since port maps aren't + required by all network plugins. Useful for advertising SDN and + overlay network addresses. Task will fail if driver network cannot be + determined. Only implemented for Docker and rkt. + + - `host` - Use the host IP and port. + - `Checks`: `Checks` is an array of check objects. A check object defines a health check associated with the service. Nomad supports the `script`, `http` and `tcp` Consul Checks. Script checks are not supported for the @@ -357,6 +378,25 @@ The `Task` object supports the following keys: - `Name`: The name of the health check. + - `AddressMode`: Same as `AddressMode` on `Service`. Unlike services, + checks do not have an `auto` address mode as there's no way for + Nomad to know which is the best address to use for checks. Consul + needs access to the address for any HTTP or TCP checks. Added in + Nomad 0.7.1. Unlike `PortLabel`, this setting is *not* inherited + from the `Service`. + + - `PortLabel`: Specifies the label of the port on which the check will + be performed. Note this is the _label_ of the port and not the port + number unless `AddressMode: "driver"`. The port label must match one + defined in the Network stanza. If a port value was declared on the + `Service`, this will inherit from that value if not supplied. If + supplied, this value takes precedence over the `Service.PortLabel` + value. This is useful for services which operate on multiple ports. + `http` and `tcp` checks require a port while `script` checks do not. + Checks will use the host IP and ports by default. In Nomad 0.7.1 or + later numeric ports may be used if `AddressMode: "driver"` is set on + the check. + - `Header`: Headers for HTTP checks. Should be an object where the values are an array of values. Headers will be written once for each value. diff --git a/website/source/docs/commands/_general_options.html.md b/website/source/docs/commands/_general_options.html.md index a3d95dfb6..5a1811f9b 100644 --- a/website/source/docs/commands/_general_options.html.md +++ b/website/source/docs/commands/_general_options.html.md @@ -26,3 +26,6 @@ - `-tls-skip-verify`: Do not verify TLS certificate. This is highly not recommended. Verification will also be skipped if `NOMAD_SKIP_VERIFY` is set. + +- `-token`: The SecretID of an ACL token to use to authenticate API requests with. + Overrides the `NOMAD_TOKEN` environment variable if set. diff --git a/website/source/docs/drivers/docker.html.md b/website/source/docs/drivers/docker.html.md index 584e68a5b..4ea260386 100644 --- a/website/source/docs/drivers/docker.html.md +++ b/website/source/docs/drivers/docker.html.md @@ -328,6 +328,7 @@ The `docker` driver supports the following configuration in the job spec. Only ] } ``` + ### Container Name Nomad creates a container after pulling an image. Containers are named diff --git a/website/source/docs/drivers/rkt.html.md b/website/source/docs/drivers/rkt.html.md index 33d44f648..b369efb12 100644 --- a/website/source/docs/drivers/rkt.html.md +++ b/website/source/docs/drivers/rkt.html.md @@ -10,7 +10,7 @@ description: |- Name: `rkt` -The `rkt` driver provides an interface for using CoreOS rkt for running +The `rkt` driver provides an interface for using rkt for running application containers. ## Task Configuration diff --git a/website/source/docs/job-specification/service.html.md b/website/source/docs/job-specification/service.html.md index 9707d8ec4..578f0c1ed 100644 --- a/website/source/docs/job-specification/service.html.md +++ b/website/source/docs/job-specification/service.html.md @@ -96,7 +96,7 @@ does not automatically enable service discovery. interpolated and revalidated. This can cause certain service names to pass validation at submit time but fail at runtime. -- `port` `(string: )` - Specifies the label of the port on which this +- `port` `(string: )` - Specifies the label of the port on which this service is running. Note this is the _label_ of the port and not the port number unless `address_mode = driver`. The port label must match one defined in the [`network`][network] stanza unless you're also using @@ -174,14 +174,15 @@ scripts. add the IP of the service and the port, so this is just the relative URL to the health check endpoint. This is required for http-based health checks. -- `port` `(string: )` - Specifies the label of the port on which the +- `port` `(string: )` - Specifies the label of the port on which the check will be performed. Note this is the _label_ of the port and not the port number unless `address_mode = driver`. The port label must match one defined in the [`network`][network] stanza. If a port value was declared on the `service`, this will inherit from that value if not supplied. If supplied, this value takes precedence over the `service.port` value. This is useful for - services which operate on multiple ports. Checks will use the host IP and - ports by default. In Nomad 0.7.1 or later numeric ports may be used if + services which operate on multiple ports. `http` and `tcp` checks require a + port while `script` checks do not. Checks will use the host IP and ports by + default. In Nomad 0.7.1 or later numeric ports may be used if `address_mode="driver"` is set on the check. - `protocol` `(string: "http")` - Specifies the protocol for the http-based diff --git a/website/source/docs/job-specification/task.html.md b/website/source/docs/job-specification/task.html.md index f23406297..f5bb18897 100644 --- a/website/source/docs/job-specification/task.html.md +++ b/website/source/docs/job-specification/task.html.md @@ -56,8 +56,7 @@ job "docs" { - `kill_signal` `(string)` - Specifies a configurable kill signal for a task, where the default is SIGINT. Note that this is only supported for drivers - which accept sending signals (currently Docker, exec, raw_exec, and Java - drivers). + sending signals (currently `docker`, `exec`, `raw_exec`, and `java` drivers). - `leader` `(bool: false)` - Specifies whether the task is the leader task of the task group. If set to true, when the leader task completes, all other diff --git a/website/source/docs/job-specification/template.html.md b/website/source/docs/job-specification/template.html.md index 0c7ef6021..2e3a92275 100644 --- a/website/source/docs/job-specification/template.html.md +++ b/website/source/docs/job-specification/template.html.md @@ -191,7 +191,7 @@ template { # Empty lines are also ignored LOG_LEVEL="{{key "service/geo-api/log-verbosity"}}" -API_KEY="{{with secret "secret/geo-api-key"}}{{.Data.key}}{{end}}" +API_KEY="{{with secret "secret/geo-api-key"}}{{.Data.value}}{{end}}" EOH destination = "secrets/file.env" diff --git a/website/source/downloads.html.erb b/website/source/downloads.html.erb index d3d994e5b..4156f48ae 100644 --- a/website/source/downloads.html.erb +++ b/website/source/downloads.html.erb @@ -31,10 +31,6 @@ description: |-

Check out the v<%= latest_version %> CHANGELOG for information on the latest release.

-

Nomad 0.7.1 RC1

-

- A release candidate of Nomad 0.7.1 is also available! The RC can be downloaded on the Nomad releases page. -

diff --git a/website/source/guides/securing-nomad.html.md b/website/source/guides/securing-nomad.html.md index 182c04e41..d4898f3dd 100644 --- a/website/source/guides/securing-nomad.html.md +++ b/website/source/guides/securing-nomad.html.md @@ -113,7 +113,7 @@ $ echo '{}' | cfssl gencert -ca=nomad-ca.pem -ca-key=nomad-ca-key.pem -config=cf -hostname="client.global.nomad,localhost,127.0.0.1" - | cfssljson -bare client # Generate a certificate for the CLI -$ echo '{}' | cfssl gencert -ca nomad-ca.pem -ca-key nomad-ca-key.pem -profile=client \ +$ echo '{}' | cfssl gencert -ca=nomad-ca.pem -ca-key=nomad-ca-key.pem -profile=client \ - | cfssljson -bare cli ``` @@ -433,7 +433,7 @@ TTL. ## Changing Nomad certificates on the fly -As of 0.7.1, Nomad supports dynamic certificate reloading via SIHUP. +As of 0.7.1, Nomad supports dynamic certificate reloading via SIGHUP. Given a prior TLS configuration as follows: diff --git a/website/source/guides/spark/configuration.html.md b/website/source/guides/spark/configuration.html.md index e35d7623d..929861be9 100644 --- a/website/source/guides/spark/configuration.html.md +++ b/website/source/guides/spark/configuration.html.md @@ -13,6 +13,14 @@ are generally applicable to the Nomad integration. The properties listed below are specific to running Spark on Nomad. Configuration properties can be set by adding `--conf [property]=[value]` to the `spark-submit` command. +- `spark.nomad.authToken` `(string: nil)` - Specifies the secret key of the auth +token to use when accessing the API. This falls back to the NOMAD_TOKEN environment +variable. Note that if this configuration setting is set and the cluster deploy +mode is used, this setting will be propagated to the driver application in the +job spec. If it is not set and an auth token is taken from the NOMAD_TOKEN +environment variable, the token will not be propagated to the driver which will +require the driver to pick up its token from an environment variable. + - `spark.nomad.cluster.expectImmediateScheduling` `(bool: false)` - Specifies that `spark-submit` should fail if Nomad is not able to schedule the job immediately. @@ -101,12 +109,14 @@ time that Nomad should wait before retrying executor task groups upon failure. - `spark.nomad.executor.retryInterval` `(string: "1d")` - Specifies Nomad's retry interval for executor task groups. -- `spark.nomad.job` `(string: nil)` - Specifies the Nomad job name. - - `spark.nomad.job.template` `(string: nil)` - Specifies the path to a JSON file containing a Nomad job to use as a template. This can also be set with `spark-submit's --nomad-template` parameter. +- `spark.nomad.namespace` `(string: nil)` - Specifies the namespace to use. This +falls back first to the NOMAD_NAMESPACE environment variable and then to Nomad's +default namespace. + - `spark.nomad.priority` `(string: nil)` - Specifies the priority for the Nomad job. diff --git a/website/source/resources.html.erb b/website/source/resources.html.erb index af6953134..06e43b96b 100644 --- a/website/source/resources.html.erb +++ b/website/source/resources.html.erb @@ -19,7 +19,7 @@ description: |-
  • How Jet.com uses HashiCorp Nomad on Azure to run its applications
  • End to end production Nomad at Citadel
  • HashiCorp Nomad demo and Citadel use case
  • -
  • Verizon Nelson: Multi-region Container Orchestration for Hashicorp Nomad and Lyft Envoy
  • +
  • Nelson: Multi-region Container Orchestration for Hashicorp Nomad and Lyft Envoy
  • Elsevier's Container Framework with Nomad, Terraform, and Consul
  • PagerDuty's Nomadic Journey
  • Operating jobs at scale with Nomad
  • @@ -29,68 +29,68 @@ description: |-

    Blog Posts

    Tools for Provisioning and Experimentation

    Integrations

    Other

    Trusted By

      -
    • Barclays
    • -
    • Bownty.com
    • -
    • CircleCI
    • -
    • Citadel
    • -
    • Deluxe Entertainment
    • -
    • Elsevier
    • -
    • Jet.com
    • -
    • Pagerduty
    • -
    • SAP Ariba
    • -
    • SeatGeek
    • -
    • Spaceflight Industries
    • -
    • SpotInst
    • -
    • UnderArmour
    • -
    • Verizon
    • +
    • Barclays
    • +
    • Bownty.com
    • +
    • CircleCI
    • +
    • Citadel
    • +
    • Deluxe Entertainment
    • +
    • Elsevier
    • +
    • Jet.com
    • +
    • Pagerduty
    • +
    • SAP Ariba
    • +
    • SeatGeek
    • +
    • Spaceflight Industries
    • +
    • SpotInst
    • +
    • Under Armour

    - If you would like to have a resource or your company name added to this page, please - submit a Pull Request. + If you would like to have a resource or your company name added to this page, please + submit a Pull Request.