From 2c44cbb001bdd569cf2719323fd2f2b18df43b6d Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Tue, 11 Apr 2023 08:45:08 -0500 Subject: [PATCH] api: enable support for setting original job source (#16763) * api: enable support for setting original source alongside job This PR adds support for setting job source material along with the registration of a job. This includes a new HTTP endpoint and a new RPC endpoint for making queries for the original source of a job. The HTTP endpoint is /v1/job//submission?version= and the RPC method is Job.GetJobSubmission. The job source (if submitted, and doing so is always optional), is stored in the job_submission memdb table, separately from the actual job. This way we do not incur overhead of reading the large string field throughout normal job operations. The server config now includes job_max_source_size for configuring the maximum size the job source may be, before the server simply drops the source material. This should help prevent Bad Things from happening when huge jobs are submitted. If the value is set to 0, all job source material will be dropped. * api: avoid writing var content to disk for parsing * api: move submission validation into RPC layer * api: return an error if updating a job submission without namespace or job id * api: be exact about the job index we associate a submission with (modify) * api: reword api docs scheduling * api: prune all but the last 6 job submissions * api: protect against nil job submission in job validation * api: set max job source size in test server * api: fixups from pr --- .changelog/16763.txt | 3 + api/jobs.go | 88 +++++- api/jobs_test.go | 295 ++++++++++++++++++ api/util_test.go | 4 +- client/client_test.go | 8 +- client/gc_test.go | 2 +- command/agent/agent.go | 16 +- command/agent/agent_test.go | 33 +- command/agent/config.go | 11 +- command/agent/deployment_endpoint_test.go | 10 +- command/agent/fs_endpoint_test.go | 2 +- command/agent/job_endpoint.go | 67 +++- command/agent/job_endpoint_test.go | 121 +++++-- command/agent/search_endpoint_test.go | 4 +- command/helpers.go | 92 ++++-- command/helpers_test.go | 41 ++- command/job_allocs_test.go | 8 +- command/job_deployments_test.go | 8 +- command/job_dispatch_test.go | 6 +- command/job_eval_test.go | 6 +- command/job_history_test.go | 4 +- command/job_inspect_test.go | 4 +- command/job_periodic_force_test.go | 4 +- command/job_plan.go | 2 +- command/job_promote_test.go | 4 +- command/job_revert_test.go | 6 +- command/job_run.go | 3 +- command/job_scale_test.go | 2 +- command/job_scaling_events_test.go | 2 +- command/job_status_test.go | 6 +- command/job_stop_test.go | 4 +- command/job_validate.go | 2 +- command/status_test.go | 10 +- jobspec2/parse.go | 17 +- nomad/client_alloc_endpoint_test.go | 28 +- nomad/client_fs_endpoint_test.go | 40 +-- nomad/config.go | 5 + nomad/core_sched_test.go | 38 +-- nomad/deployment_endpoint_test.go | 50 +-- .../deployments_watcher_test.go | 62 ++-- nomad/deploymentwatcher/testutil_test.go | 2 +- nomad/drainer/draining_node_test.go | 16 +- nomad/drainer/watch_jobs_test.go | 8 +- nomad/drainer/watch_nodes_test.go | 4 +- nomad/eval_endpoint_test.go | 4 +- nomad/fsm.go | 4 +- nomad/fsm_test.go | 26 +- nomad/job_endpoint.go | 54 +++- nomad/job_endpoint_hooks.go | 29 ++ nomad/job_endpoint_hooks_test.go | 37 +++ nomad/job_endpoint_test.go | 292 ++++++++++++++--- nomad/mock/mock.go | 44 +++ nomad/namespace_endpoint_test.go | 4 +- nomad/node_endpoint_test.go | 26 +- nomad/operator_endpoint_test.go | 2 +- nomad/periodic_endpoint_test.go | 6 +- nomad/periodic_test.go | 10 +- nomad/scaling_endpoint_test.go | 4 +- nomad/search_endpoint_test.go | 20 +- nomad/service_registration_endpoint_test.go | 4 +- nomad/state/deployment_events_test.go | 2 +- nomad/state/events_test.go | 10 +- nomad/state/schema.go | 49 ++- nomad/state/state_store.go | 121 ++++++- nomad/state/state_store_test.go | 269 ++++++++++------ nomad/structs/structs.go | 58 ++++ nomad/system_endpoint_test.go | 4 +- nomad/testing.go | 3 + nomad/volumewatcher/volumes_watcher_test.go | 2 +- nomad/worker_test.go | 6 +- scheduler/benchmarks/benchmarks_test.go | 2 +- scheduler/feasible_test.go | 2 +- scheduler/generic_sched_test.go | 182 +++++------ scheduler/preemption_test.go | 4 +- scheduler/scheduler_sysbatch_test.go | 54 ++-- scheduler/scheduler_system_test.go | 78 ++--- scheduler/spread_test.go | 6 +- website/content/api-docs/jobs.mdx | 70 ++++- website/content/docs/configuration/server.mdx | 5 + 79 files changed, 1987 insertions(+), 654 deletions(-) create mode 100644 .changelog/16763.txt diff --git a/.changelog/16763.txt b/.changelog/16763.txt new file mode 100644 index 000000000..0b1020907 --- /dev/null +++ b/.changelog/16763.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: enable support for storing original job source +``` diff --git a/api/jobs.go b/api/jobs.go index c99a981ef..64b25c710 100644 --- a/api/jobs.go +++ b/api/jobs.go @@ -12,6 +12,7 @@ import ( "time" "github.com/hashicorp/cronexpr" + "golang.org/x/exp/maps" ) const ( @@ -71,6 +72,11 @@ type JobsParseRequest struct { // HCLv1 indicates whether the JobHCL should be parsed with the hcl v1 parser HCLv1 bool `json:"hclv1,omitempty"` + // Variables are HCL2 variables associated with the job. Only works with hcl2. + // + // Interpreted as if it were the content of a variables file. + Variables string + // Canonicalize is a flag as to if the server should return default values // for unset fields Canonicalize bool @@ -81,7 +87,7 @@ func (c *Client) Jobs() *Jobs { return &Jobs{client: c} } -// ParseHCL is used to convert the HCL repesentation of a Job to JSON server side. +// ParseHCL is used to convert the HCL representation of a Job to JSON server side. // To parse the HCL client side see package github.com/hashicorp/nomad/jobspec // Use ParseHCLOpts if you need to customize JobsParseRequest. func (j *Jobs) ParseHCL(jobHCL string, canonicalize bool) (*Job, error) { @@ -92,10 +98,8 @@ func (j *Jobs) ParseHCL(jobHCL string, canonicalize bool) (*Job, error) { return j.ParseHCLOpts(req) } -// ParseHCLOpts is used to convert the HCL representation of a Job to JSON -// server side. To parse the HCL client side see package -// github.com/hashicorp/nomad/jobspec. -// ParseHCL is an alternative convenience API for HCLv2 users. +// ParseHCLOpts is used to request the server convert the HCL representation of a +// Job to JSON on our behalf. Accepts HCL1 or HCL2 jobs as input. func (j *Jobs) ParseHCLOpts(req *JobsParseRequest) (*Job, error) { var job Job _, err := j.client.put("/v1/jobs/parse", req, &job, nil) @@ -119,6 +123,7 @@ type RegisterOptions struct { PolicyOverride bool PreserveCounts bool EvalPriority int + Submission *JobSubmission } // Register is used to register a new job. It returns the ID @@ -137,9 +142,7 @@ func (j *Jobs) EnforceRegister(job *Job, modifyIndex uint64, q *WriteOptions) (* // returns the ID of the evaluation, along with any errors encountered. func (j *Jobs) RegisterOpts(job *Job, opts *RegisterOptions, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { // Format the request - req := &JobRegisterRequest{ - Job: job, - } + req := &JobRegisterRequest{Job: job} if opts != nil { if opts.EnforceIndex { req.EnforceIndex = true @@ -148,6 +151,7 @@ func (j *Jobs) RegisterOpts(job *Job, opts *RegisterOptions, q *WriteOptions) (* req.PolicyOverride = opts.PolicyOverride req.PreserveCounts = opts.PreserveCounts req.EvalPriority = opts.EvalPriority + req.Submission = opts.Submission } var resp JobRegisterResponse @@ -255,6 +259,19 @@ func (j *Jobs) Versions(jobID string, diffs bool, q *QueryOptions) ([]*Job, []*J return resp.Versions, resp.Diffs, qm, nil } +// Submission is used to retrieve the original submitted source of a job given its +// namespace, jobID, and version number. The original source might not be available, +// which case nil is returned with no error. +func (j *Jobs) Submission(jobID string, version int, q *QueryOptions) (*JobSubmission, *QueryMeta, error) { + var sub JobSubmission + s := fmt.Sprintf("/v1/job/%s/submission?version=%d", url.PathEscape(jobID), version) + qm, err := j.client.query(s, &sub, q) + if err != nil { + return nil, nil, err + } + return &sub, qm, nil +} + // Allocations is used to return the allocs for a given job ID. func (j *Jobs) Allocations(jobID string, allAllocs bool, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { var resp []*AllocationListStub @@ -866,6 +883,51 @@ type ParameterizedJobConfig struct { MetaOptional []string `mapstructure:"meta_optional" hcl:"meta_optional,optional"` } +// JobSubmission is used to hold information about the original content of a job +// specification being submitted to Nomad. +// +// At any time a JobSubmission may be nil, indicating no information is known about +// the job submission. +type JobSubmission struct { + // Source contains the original job definition (may be in the format of + // hcl1, hcl2, or json). + Source string + + // Format indicates what the Source content was (hcl1, hcl2, or json). + Format string + + // VariableFlags contains the CLI "-var" flag arguments as submitted with the + // job (hcl2 only). + VariableFlags map[string]string + + // Variables contains the opaque variables configuration as coming from + // a var-file or the WebUI variables input (hcl2 only). + Variables string +} + +func (js *JobSubmission) Canonicalize() { + if js == nil { + return + } + + if len(js.VariableFlags) == 0 { + js.VariableFlags = nil + } +} + +func (js *JobSubmission) Copy() *JobSubmission { + if js == nil { + return nil + } + + return &JobSubmission{ + Source: js.Source, + Format: js.Format, + VariableFlags: maps.Clone(js.VariableFlags), + Variables: js.Variables, + } +} + // Job is used to serialize a job. type Job struct { /* Fields parsed from HCL config */ @@ -1251,7 +1313,9 @@ type JobRevertRequest struct { // JobRegisterRequest is used to update a job type JobRegisterRequest struct { - Job *Job + Submission *JobSubmission + Job *Job + // If EnforceIndex is set then the job will only be registered if the passed // JobModifyIndex matches the current Jobs index. If the index is zero, the // register only occurs if the job is new. @@ -1389,6 +1453,12 @@ type JobVersionsResponse struct { QueryMeta } +// JobSubmissionResponse is used for a job get submission request +type JobSubmissionResponse struct { + Submission *JobSubmission + QueryMeta +} + // JobStabilityRequest is used to marked a job as stable. type JobStabilityRequest struct { // Job to set the stability on diff --git a/api/jobs_test.go b/api/jobs_test.go index 78ac58495..60d158542 100644 --- a/api/jobs_test.go +++ b/api/jobs_test.go @@ -1439,6 +1439,301 @@ func TestJobs_Versions(t *testing.T) { must.Eq(t, *job.ID, *result[0].ID) } +func TestJobs_JobSubmission_Canonicalize(t *testing.T) { + testutil.Parallel(t) + + t.Run("nil", func(t *testing.T) { + var js *JobSubmission + js.Canonicalize() + must.Nil(t, js) + }) + + t.Run("empty variable flags", func(t *testing.T) { + js := &JobSubmission{ + Source: "abc123", + Format: "hcl2", + VariableFlags: make(map[string]string), + } + js.Canonicalize() + must.Nil(t, js.VariableFlags) + }) +} + +func TestJobs_JobSubmission_Copy(t *testing.T) { + testutil.Parallel(t) + + t.Run("nil", func(t *testing.T) { + var js *JobSubmission + c := js.Copy() + must.Nil(t, c) + }) + + t.Run("copy", func(t *testing.T) { + js := &JobSubmission{ + Source: "source", + Format: "format", + VariableFlags: map[string]string{"foo": "bar"}, + Variables: "variables", + } + c := js.Copy() + c.Source = "source2" + c.Format = "format2" + c.VariableFlags["foo"] = "baz" + c.Variables = "variables2" + must.Eq(t, &JobSubmission{ + Source: "source", + Format: "format", + VariableFlags: map[string]string{"foo": "bar"}, + Variables: "variables", + }, js) + }) +} + +func TestJobs_Submission_versions(t *testing.T) { + testutil.Parallel(t) + + c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true }) + t.Cleanup(s.Stop) + + jobs := c.Jobs() + + job := testJob() + jobID := *job.ID // job1 + job.TaskGroups[0].Count = pointerOf(0) // no need to actually run + + // trying to retrieve a version before job is submitted returns a Not Found + _, _, nfErr := jobs.Submission(jobID, 0, nil) + must.ErrorContains(t, nfErr, "job source not found") + + // register our test job at version 0 + job.Meta = map[string]string{"v": "0"} + _, wm, regErr := jobs.RegisterOpts(job, &RegisterOptions{ + Submission: &JobSubmission{ + Source: "the job source v0", + Format: "hcl2", + VariableFlags: map[string]string{"X": "x", "Y": "42", "Z": "true"}, + Variables: "var file content", + }, + }, nil) + must.NoError(t, regErr) + assertWriteMeta(t, wm) + + expectSubmission := func(sub *JobSubmission, format, source, vars string, flags map[string]string) { + must.NotNil(t, sub, must.Sprintf("expected a non-nil job submission for job %s @ version %d", jobID, 0)) + must.Eq(t, format, sub.Format) + must.Eq(t, source, sub.Source) + must.Eq(t, vars, sub.Variables) + must.MapEq(t, flags, sub.VariableFlags) + } + + // we should have a version 0 now + sub, _, err := jobs.Submission(jobID, 0, nil) + must.NoError(t, err) + expectSubmission(sub, "hcl2", "the job source v0", "var file content", map[string]string{"X": "x", "Y": "42", "Z": "true"}) + + // register our test job at version 1 + job.Meta = map[string]string{"v": "1"} + _, wm, regErr = jobs.RegisterOpts(job, &RegisterOptions{ + Submission: &JobSubmission{ + Source: "the job source v1", + Format: "hcl2", + VariableFlags: nil, + Variables: "different var content", + }, + }, nil) + must.NoError(t, regErr) + assertWriteMeta(t, wm) + + // we should have a version 1 now + sub, _, err = jobs.Submission(jobID, 1, nil) + must.NoError(t, err) + expectSubmission(sub, "hcl2", "the job source v1", "different var content", nil) + + // if we query for version 0 we should still have it + sub, _, err = jobs.Submission(jobID, 0, nil) + must.NoError(t, err) + expectSubmission(sub, "hcl2", "the job source v0", "var file content", map[string]string{"X": "x", "Y": "42", "Z": "true"}) + + // deregister (and purge) the job + _, _, err = jobs.Deregister(jobID, true, &WriteOptions{Namespace: "default"}) + must.NoError(t, err) + + // now if we query for a submission of v0 it will be gone + sub, _, err = jobs.Submission(jobID, 0, nil) + must.ErrorContains(t, err, "job source not found") + must.Nil(t, sub) + + // same for the v1 submission + sub, _, err = jobs.Submission(jobID, 1, nil) + must.ErrorContains(t, err, "job source not found") + must.Nil(t, sub) +} + +func TestJobs_Submission_namespaces(t *testing.T) { + testutil.Parallel(t) + + c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true }) + t.Cleanup(s.Stop) + + first := &Namespace{ + Name: "first", + Description: "first namespace", + } + + second := &Namespace{ + Name: "second", + Description: "second namespace", + } + + // create two namespaces + namespaces := c.Namespaces() + _, err := namespaces.Register(first, nil) + must.NoError(t, err) + _, err = namespaces.Register(second, nil) + must.NoError(t, err) + + jobs := c.Jobs() + + // use the same jobID to prove we can query submissions of the same ID but + // in different namespaces + commonJobID := "common" + + job := testJob() + job.ID = pointerOf(commonJobID) + job.TaskGroups[0].Count = pointerOf(0) + + // register our test job into first namespace + _, wm, err := jobs.RegisterOpts(job, &RegisterOptions{ + Submission: &JobSubmission{ + Source: "the job source", + Format: "hcl2", + }, + }, &WriteOptions{Namespace: "first"}) + must.NoError(t, err) + assertWriteMeta(t, wm) + + // if we query in the default namespace the submission should not exist + sub, _, err := jobs.Submission(commonJobID, 0, nil) + must.ErrorContains(t, err, "not found") + must.Nil(t, sub) + + // if we query in the first namespace we expect to get the submission + sub, _, err = jobs.Submission(commonJobID, 0, &QueryOptions{Namespace: "first"}) + must.NoError(t, err) + must.Eq(t, "the job source", sub.Source) + + // if we query in the second namespace we expect the submission should not exist + sub, _, err = jobs.Submission(commonJobID, 0, &QueryOptions{Namespace: "second"}) + must.ErrorContains(t, err, "not found") + must.Nil(t, sub) + + // create a second test job for our second namespace + job2 := testJob() + job2.ID = pointerOf(commonJobID) + // keep job name redis to prove we write to correct namespace + job.TaskGroups[0].Count = pointerOf(0) + + // register our second job into the second namespace + _, wm, err = jobs.RegisterOpts(job2, &RegisterOptions{ + Submission: &JobSubmission{ + Source: "second job source", + Format: "hcl1", + }, + }, &WriteOptions{Namespace: "second"}) + must.NoError(t, err) + assertWriteMeta(t, wm) + + // if we query in the default namespace the submission should not exist + sub, _, err = jobs.Submission(commonJobID, 0, nil) + must.ErrorContains(t, err, "not found") + must.Nil(t, sub) + + // if we query in the first namespace we expect to get the first job submission + sub, _, err = jobs.Submission(commonJobID, 0, &QueryOptions{Namespace: "first"}) + must.NoError(t, err) + must.Eq(t, "the job source", sub.Source) + + // if we query in the second namespace we expect the second job submission + sub, _, err = jobs.Submission(commonJobID, 0, &QueryOptions{Namespace: "second"}) + must.NoError(t, err) + must.Eq(t, "second job source", sub.Source) + + // if we query v1 in the first namespace we expect nothing + sub, _, err = jobs.Submission(commonJobID, 1, &QueryOptions{Namespace: "first"}) + must.ErrorContains(t, err, "not found") + must.Nil(t, sub) + + // if we query v1 in the second namespace we expect nothing + sub, _, err = jobs.Submission(commonJobID, 1, &QueryOptions{Namespace: "second"}) + must.ErrorContains(t, err, "not found") + must.Nil(t, sub) +} + +func TestJobs_Submission_delete(t *testing.T) { + testutil.Parallel(t) + + c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true }) + t.Cleanup(s.Stop) + + first := &Namespace{ + Name: "first", + Description: "first namespace", + } + + namespaces := c.Namespaces() + _, err := namespaces.Register(first, nil) + must.NoError(t, err) + + jobs := c.Jobs() + job := testJob() + jobID := *job.ID + job.TaskGroups[0].Count = pointerOf(0) + job.Meta = map[string]string{"version": "0"} + + // register our test job into first namespace + _, wm, err := jobs.RegisterOpts(job, &RegisterOptions{ + Submission: &JobSubmission{ + Source: "the job source v0", + Format: "hcl2", + }, + }, &WriteOptions{Namespace: "first"}) + must.NoError(t, err) + assertWriteMeta(t, wm) + + // modify the job and register it again + job.Meta["version"] = "1" + _, wm, err = jobs.RegisterOpts(job, &RegisterOptions{ + Submission: &JobSubmission{ + Source: "the job source v1", + Format: "hcl2", + }, + }, &WriteOptions{Namespace: "first"}) + must.NoError(t, err) + assertWriteMeta(t, wm) + + // ensure we have our submissions for both versions + sub, _, err := jobs.Submission(jobID, 0, &QueryOptions{Namespace: "first"}) + must.NoError(t, err) + must.Eq(t, "the job source v0", sub.Source) + + sub, _, err = jobs.Submission(jobID, 1, &QueryOptions{Namespace: "first"}) + must.NoError(t, err) + must.Eq(t, "the job source v1", sub.Source) + + // deregister (and purge) the job + _, _, err = jobs.Deregister(jobID, true, &WriteOptions{Namespace: "first"}) + must.NoError(t, err) + + // ensure all submissions for the job are gone + sub, _, err = jobs.Submission(jobID, 0, &QueryOptions{Namespace: "first"}) + must.ErrorContains(t, err, "job source not found") + must.Nil(t, sub) + + sub, _, err = jobs.Submission(jobID, 1, &QueryOptions{Namespace: "first"}) + must.ErrorContains(t, err, "job source not found") + must.Nil(t, sub) +} + func TestJobs_PrefixList(t *testing.T) { testutil.Parallel(t) diff --git a/api/util_test.go b/api/util_test.go index c77aacea3..5af2625b2 100644 --- a/api/util_test.go +++ b/api/util_test.go @@ -20,9 +20,7 @@ func assertQueryMeta(t *testing.T, qm *QueryMeta) { func assertWriteMeta(t *testing.T, wm *WriteMeta) { t.Helper() - if wm.LastIndex == 0 { - t.Fatalf("bad index: %d", wm.LastIndex) - } + must.Positive(t, wm.LastIndex, must.Sprint("expected WriteMeta.LastIndex to be > 0")) } func testJob() *Job { diff --git a/client/client_test.go b/client/client_test.go index bdff73011..2c8af3e4e 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -481,7 +481,7 @@ func TestClient_WatchAllocs(t *testing.T) { alloc2.Job = job state := s1.State() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job); err != nil { t.Fatal(err) } if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil { @@ -578,7 +578,7 @@ func TestClient_SaveRestoreState(t *testing.T) { alloc1.ClientStatus = structs.AllocClientStatusRunning state := s1.State() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job); err != nil { t.Fatal(err) } if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil { @@ -684,7 +684,7 @@ func TestClient_AddAllocError(t *testing.T) { alloc1.TaskResources = nil state := s1.State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) require.Nil(err) err = state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)) @@ -1739,7 +1739,7 @@ func TestClient_ReconnectAllocs(t *testing.T) { runningAlloc.ClientStatus = structs.AllocClientStatusPending state := s1.State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) require.NoError(t, err) err = state.UpsertJobSummary(101, mock.JobSummary(runningAlloc.JobID)) diff --git a/client/gc_test.go b/client/gc_test.go index 84e3ed4d8..991478a79 100644 --- a/client/gc_test.go +++ b/client/gc_test.go @@ -397,7 +397,7 @@ func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { upsertJobFn := func(server *nomad.Server, j *structs.Job) { state := server.State() - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, nextIndex(), j)) + require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, nextIndex(), nil, j)) require.NoError(state.UpsertJobSummary(nextIndex(), mock.JobSummary(j.ID))) } diff --git a/command/agent/agent.go b/command/agent/agent.go index 316aea06e..9019672bc 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -17,6 +17,7 @@ import ( "time" metrics "github.com/armon/go-metrics" + "github.com/dustin/go-humanize" consulapi "github.com/hashicorp/consul/api" log "github.com/hashicorp/go-hclog" uuidparse "github.com/hashicorp/go-uuid" @@ -29,6 +30,7 @@ import ( "github.com/hashicorp/nomad/helper/bufconndialer" "github.com/hashicorp/nomad/helper/escapingfs" "github.com/hashicorp/nomad/helper/pluginutils/loader" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/lib/cpuset" "github.com/hashicorp/nomad/nomad" @@ -163,7 +165,7 @@ func NewAgent(config *Config, logger log.InterceptLogger, logOutput io.Writer, i // convertServerConfig takes an agent config and log output and returns a Nomad // Config. There may be missing fields that must be set by the agent. To do this -// call finalizeServerConfig +// call finalizeServerConfig. func convertServerConfig(agentConfig *Config) (*nomad.Config, error) { conf := agentConfig.NomadConfig if conf == nil { @@ -574,6 +576,16 @@ func convertServerConfig(agentConfig *Config) (*nomad.Config, error) { conf.RaftBoltNoFreelistSync = bolt.NoFreelistSync } + // Interpret job_max_source_size as bytes from string value + if agentConfig.Server.JobMaxSourceSize == nil { + agentConfig.Server.JobMaxSourceSize = pointer.Of("1M") + } + jobMaxSourceBytes, err := humanize.ParseBytes(*agentConfig.Server.JobMaxSourceSize) + if err != nil { + return nil, fmt.Errorf("failed to parse max job source bytes: %w", err) + } + conf.JobMaxSourceSize = int(jobMaxSourceBytes) + return conf, nil } @@ -606,7 +618,7 @@ func (a *Agent) clientConfig() (*clientconfig.Config, error) { return nil, err } - if err := a.finalizeClientConfig(c); err != nil { + if err = a.finalizeClientConfig(c); err != nil { return nil, err } diff --git a/command/agent/agent_test.go b/command/agent/agent_test.go index bcdbf2410..e7a38ff15 100644 --- a/command/agent/agent_test.go +++ b/command/agent/agent_test.go @@ -678,7 +678,7 @@ func TestAgent_ServerConfig_RaftProtocol_3(t *testing.T) { } } -func TestAgent_ClientConfig(t *testing.T) { +func TestAgent_ClientConfig_discovery(t *testing.T) { ci.Parallel(t) conf := DefaultConfig() conf.Client.Enabled = true @@ -730,6 +730,21 @@ func TestAgent_ClientConfig(t *testing.T) { require.False(t, c.NomadServiceDiscovery) } +func TestAgent_ClientConfig_JobMaxSourceSize(t *testing.T) { + ci.Parallel(t) + + conf := DevConfig(nil) + must.Eq(t, conf.Server.JobMaxSourceSize, pointer.Of("1M")) + must.NoError(t, conf.normalizeAddrs()) + + // config conversion ensures value is set + conf.Server.JobMaxSourceSize = nil + agent := &Agent{config: conf} + serverConf, err := agent.serverConfig() + must.NoError(t, err) + must.Eq(t, 1e6, serverConf.JobMaxSourceSize) +} + func TestAgent_ClientConfig_ReservedCores(t *testing.T) { ci.Parallel(t) conf := DefaultConfig() @@ -738,30 +753,28 @@ func TestAgent_ClientConfig_ReservedCores(t *testing.T) { conf.Client.Reserved.Cores = "0,2-3" a := &Agent{config: conf} c, err := a.clientConfig() - require.NoError(t, err) - require.Exactly(t, []uint16{0, 1, 2, 3, 4, 5, 6, 7}, c.ReservableCores) - require.Exactly(t, []uint16{0, 2, 3}, c.Node.ReservedResources.Cpu.ReservedCpuCores) + must.NoError(t, err) + must.Eq(t, []uint16{0, 1, 2, 3, 4, 5, 6, 7}, c.ReservableCores) + must.Eq(t, []uint16{0, 2, 3}, c.Node.ReservedResources.Cpu.ReservedCpuCores) } // Clients should inherit telemetry configuration func TestAgent_Client_TelemetryConfiguration(t *testing.T) { ci.Parallel(t) - assert := assert.New(t) - conf := DefaultConfig() conf.DevMode = true a := &Agent{config: conf} c, err := a.clientConfig() - assert.Nil(err) + must.NoError(t, err) telemetry := conf.Telemetry - assert.Equal(c.StatsCollectionInterval, telemetry.collectionInterval) - assert.Equal(c.PublishNodeMetrics, telemetry.PublishNodeMetrics) - assert.Equal(c.PublishAllocationMetrics, telemetry.PublishAllocationMetrics) + must.Eq(t, c.StatsCollectionInterval, telemetry.collectionInterval) + must.Eq(t, c.PublishNodeMetrics, telemetry.PublishNodeMetrics) + must.Eq(t, c.PublishAllocationMetrics, telemetry.PublishAllocationMetrics) } // TestAgent_HTTPCheck asserts Agent.agentHTTPCheck properly alters the HTTP diff --git a/command/agent/config.go b/command/agent/config.go index a67af86d4..b97eab841 100644 --- a/command/agent/config.go +++ b/command/agent/config.go @@ -609,6 +609,11 @@ type ServerConfig struct { // for the EventBufferSize is 1. EventBufferSize *int `hcl:"event_buffer_size"` + // JobMaxSourceSize limits the maximum size of a jobs source hcl/json + // before being discarded automatically. If unset, the maximum size defaults + // to 1 MB. If the value is zero, no job sources will be stored. + JobMaxSourceSize *string `hcl:"max_job_source_size"` + // LicensePath is the path to search for an enterprise license. LicensePath string `hcl:"license_path"` @@ -675,6 +680,7 @@ func (s *ServerConfig) Copy() *ServerConfig { ns.PlanRejectionTracker = s.PlanRejectionTracker.Copy() ns.EnableEventBroker = pointer.Copy(s.EnableEventBroker) ns.EventBufferSize = pointer.Copy(s.EventBufferSize) + ns.JobMaxSourceSize = pointer.Copy(s.JobMaxSourceSize) ns.licenseAdditionalPublicKeys = slices.Clone(s.licenseAdditionalPublicKeys) ns.ExtraKeysHCL = slices.Clone(s.ExtraKeysHCL) ns.Search = s.Search.Copy() @@ -1060,7 +1066,7 @@ func (a *Addresses) Copy() *Addresses { return &na } -// AdvertiseAddrs is used to control the addresses we advertise out for +// NormalizedAddrs is used to control the addresses we advertise out for // different network services. All are optional and default to BindAddr and // their default Port. type NormalizedAddrs struct { @@ -1309,6 +1315,7 @@ func DefaultConfig() *Config { LimitResults: 100, MinTermLength: 2, }, + JobMaxSourceSize: pointer.Of("1M"), }, ACL: &ACLConfig{ Enabled: false, @@ -1978,6 +1985,8 @@ func (s *ServerConfig) Merge(b *ServerConfig) *ServerConfig { result.EventBufferSize = b.EventBufferSize } + result.JobMaxSourceSize = pointer.Merge(s.JobMaxSourceSize, b.JobMaxSourceSize) + if b.PlanRejectionTracker != nil { result.PlanRejectionTracker = result.PlanRejectionTracker.Merge(b.PlanRejectionTracker) } diff --git a/command/agent/deployment_endpoint_test.go b/command/agent/deployment_endpoint_test.go index a1eca6c12..87258f9a8 100644 --- a/command/agent/deployment_endpoint_test.go +++ b/command/agent/deployment_endpoint_test.go @@ -111,7 +111,7 @@ func TestHTTP_DeploymentAllocations(t *testing.T) { a2.TaskStates = make(map[string]*structs.TaskState) a2.TaskStates["test"] = taskState2 - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(999, d), "UpsertDeployment") assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a1, a2}), "UpsertAllocs") @@ -178,7 +178,7 @@ func TestHTTP_DeploymentPause(t *testing.T) { j := mock.Job() d := mock.Deployment() d.JobID = j.ID - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Create the pause request @@ -219,7 +219,7 @@ func TestHTTP_DeploymentPromote(t *testing.T) { j := mock.Job() d := mock.Deployment() d.JobID = j.ID - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Create the pause request @@ -263,7 +263,7 @@ func TestHTTP_DeploymentAllocHealth(t *testing.T) { a := mock.Alloc() a.JobID = j.ID a.DeploymentID = d.ID - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(999, d), "UpsertDeployment") assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a}), "UpsertAllocs") @@ -305,7 +305,7 @@ func TestHTTP_DeploymentFail(t *testing.T) { j := mock.Job() d := mock.Deployment() d.JobID = j.ID - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(999, d), "UpsertDeployment") // Make the HTTP request diff --git a/command/agent/fs_endpoint_test.go b/command/agent/fs_endpoint_test.go index c45b8ed87..8da991faf 100644 --- a/command/agent/fs_endpoint_test.go +++ b/command/agent/fs_endpoint_test.go @@ -66,7 +66,7 @@ func addAllocToClient(agent *TestAgent, alloc *structs.Allocation, wait clientAl // Upsert the allocation state := agent.server.State() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc.Job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc})) if wait == noWaitClientAlloc { diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index 6e3766f48..098f3c761 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -107,6 +107,9 @@ func (s *HTTPServer) JobSpecificRequest(resp http.ResponseWriter, req *http.Requ case strings.HasSuffix(path, "/services"): jobID := strings.TrimSuffix(path, "/services") return s.jobServiceRegistrations(resp, req, jobID) + case strings.HasSuffix(path, "/submission"): + jobID := strings.TrimSuffix(path, "/submission") + return s.jobSubmissionCRUD(resp, req, jobID) default: return s.jobCRUD(resp, req, path) } @@ -330,6 +333,42 @@ func (s *HTTPServer) jobLatestDeployment(resp http.ResponseWriter, req *http.Req return out.Deployment, nil } +func (s *HTTPServer) jobSubmissionCRUD(resp http.ResponseWriter, req *http.Request, jobID string) (*structs.JobSubmission, error) { + version, err := strconv.ParseUint(req.URL.Query().Get("version"), 10, 64) + if err != nil { + return nil, CodedError(400, "Unable to parse job submission version parameter") + } + switch req.Method { + case "GET": + return s.jobSubmissionQuery(resp, req, jobID, version) + default: + return nil, CodedError(405, ErrInvalidMethod) + } +} + +func (s *HTTPServer) jobSubmissionQuery(resp http.ResponseWriter, req *http.Request, jobID string, version uint64) (*structs.JobSubmission, error) { + args := structs.JobSubmissionRequest{ + JobID: jobID, + Version: version, + } + + if s.parse(resp, req, &args.Region, &args.QueryOptions) { + return nil, nil + } + + var out structs.JobSubmissionResponse + if err := s.agent.RPC("Job.GetJobSubmission", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + if out.Submission == nil { + return nil, CodedError(404, "job source not found") + } + + return out.Submission, nil +} + func (s *HTTPServer) jobCRUD(resp http.ResponseWriter, req *http.Request, jobID string) (interface{}, error) { switch req.Method { case "GET": @@ -413,8 +452,12 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request, jobI } sJob, writeReq := s.apiJobAndRequestToStructs(args.Job, req, args.WriteRequest) + submission := apiJobSubmissionToStructs(args.Submission) + regReq := structs.JobRegisterRequest{ - Job: sJob, + Job: sJob, + Submission: submission, + EnforceIndex: args.EnforceIndex, JobModifyIndex: args.JobModifyIndex, PolicyOverride: args.PolicyOverride, @@ -743,10 +786,14 @@ func (s *HTTPServer) JobsParseRequest(resp http.ResponseWriter, req *http.Reques jobStruct, err = jobspec.Parse(strings.NewReader(args.JobHCL)) } else { jobStruct, err = jobspec2.ParseWithConfig(&jobspec2.ParseConfig{ - Path: "input.hcl", - Body: []byte(args.JobHCL), - AllowFS: false, + Path: "input.hcl", + Body: []byte(args.JobHCL), + AllowFS: false, + VarContent: args.Variables, }) + if err != nil { + return nil, CodedError(400, fmt.Sprintf("Failed to parse job: %v", err)) + } } if err != nil { return nil, CodedError(400, err.Error()) @@ -790,6 +837,18 @@ func (s *HTTPServer) jobServiceRegistrations(resp http.ResponseWriter, req *http return reply.Services, nil } +func apiJobSubmissionToStructs(submission *api.JobSubmission) *structs.JobSubmission { + if submission == nil { + return nil + } + return &structs.JobSubmission{ + Source: submission.Source, + Format: submission.Format, + VariableFlags: submission.VariableFlags, + Variables: submission.Variables, + } +} + // apiJobAndRequestToStructs parses the query params from the incoming // request and converts to a structs.Job and WriteRequest with the func (s *HTTPServer) apiJobAndRequestToStructs(job *api.Job, req *http.Request, apiReq api.WriteRequest) (*structs.Job, *structs.WriteRequest) { diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index fb355af01..6109af2ba 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -387,31 +387,45 @@ func TestHTTP_JobsParse(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { buf := encodeReq(api.JobsParseRequest{JobHCL: mock.HCL()}) req, err := http.NewRequest("POST", "/v1/jobs/parse", buf) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) respW := httptest.NewRecorder() obj, err := s.Server.JobsParseRequest(respW, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj == nil { - t.Fatal("response should not be nil") - } + must.NoError(t, err) + must.NotNil(t, obj) job := obj.(*api.Job) expected := mock.Job() - if job.Name == nil || *job.Name != expected.Name { - t.Fatalf("job name is '%s', expected '%s'", *job.Name, expected.Name) - } + must.Eq(t, expected.Name, *job.Name) + must.Eq(t, expected.Datacenters[0], job.Datacenters[0]) + }) +} - if job.Datacenters == nil || - job.Datacenters[0] != expected.Datacenters[0] { - t.Fatalf("job datacenters is '%s', expected '%s'", - job.Datacenters[0], expected.Datacenters[0]) - } +func TestHTTP_JobsParse_HCLVar(t *testing.T) { + ci.Parallel(t) + httpTest(t, nil, func(s *TestAgent) { + hclJob, hclVar := mock.HCLVar() + buf := encodeReq(api.JobsParseRequest{ + JobHCL: hclJob, + Variables: hclVar, + }) + req, err := http.NewRequest("POST", "/v1/jobs/parse", buf) + must.NoError(t, err) + + respW := httptest.NewRecorder() + + obj, err := s.Server.JobsParseRequest(respW, req) + must.NoError(t, err) + must.NotNil(t, obj) + + job := obj.(*api.Job) + + must.Eq(t, "var-job", *job.Name) + must.Eq(t, map[string]any{ + "command": "echo", + "args": []any{"S is stringy, N is 42, B is true"}, + }, job.TaskGroups[0].Tasks[0].Config) }) } @@ -598,7 +612,7 @@ func TestHTTP_JobQuery_Payload(t *testing.T) { // Directly manipulate the state state := s.Agent.server.State() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { t.Fatalf("Failed to upsert job: %v", err) } @@ -1608,6 +1622,49 @@ func TestHTTP_JobVersions(t *testing.T) { }) } +func TestHTTP_JobSubmission(t *testing.T) { + ci.Parallel(t) + + httpTest(t, nil, func(s *TestAgent) { + job := mock.Job() + args := structs.JobRegisterRequest{ + Job: job, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: structs.DefaultNamespace, + }, + Submission: &structs.JobSubmission{ + Source: mock.HCL(), + Format: "hcl2", + }, + } + var resp structs.JobRegisterResponse + must.NoError(t, s.Agent.RPC("Job.Register", &args, &resp)) + + respW := httptest.NewRecorder() + + // make request for job submission @ v0 + req, err := http.NewRequest(http.MethodGet, "/v1/job/"+job.ID+"/submission?version=0", nil) + must.NoError(t, err) + submission, err := s.Server.jobSubmissionCRUD(respW, req, job.ID) + must.NoError(t, err) + must.Eq(t, "hcl2", submission.Format) + must.StrContains(t, submission.Source, `job "my-job" {`) + + // make request for job submission @v1 (does not exist) + req, err = http.NewRequest(http.MethodGet, "/v1/job/"+job.ID+"/submission?version=1", nil) + must.NoError(t, err) + _, err = s.Server.jobSubmissionCRUD(respW, req, job.ID) + must.ErrorContains(t, err, "job source not found") + + // make POST request (invalid method) + req, err = http.NewRequest(http.MethodPost, "/v1/job/"+job.ID+"/submission?version=0", nil) + must.NoError(t, err) + _, err = s.Server.jobSubmissionCRUD(respW, req, job.ID) + must.ErrorContains(t, err, "Invalid method") + }) +} + func TestHTTP_PeriodicForce(t *testing.T) { ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { @@ -2267,7 +2324,7 @@ func TestHTTPServer_jobServiceRegistrations(t *testing.T) { // Generate a job and upsert this. job := mock.Job() - require.NoError(t, testState.UpsertJob(structs.MsgTypeTestSetup, 10, job)) + require.NoError(t, testState.UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)) // Generate a service registration, assigned the jobID to the // mocked jobID, and upsert this. @@ -2301,7 +2358,7 @@ func TestHTTPServer_jobServiceRegistrations(t *testing.T) { // Generate a job and upsert this. job := mock.Job() - require.NoError(t, testState.UpsertJob(structs.MsgTypeTestSetup, 10, job)) + require.NoError(t, testState.UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)) // Build the HTTP request. path := fmt.Sprintf("/v1/job/%s/services", job.ID) @@ -3635,6 +3692,30 @@ func TestConversion_apiResourcesToStructs(t *testing.T) { } } +func TestConversion_apiJobSubmissionToStructs(t *testing.T) { + ci.Parallel(t) + + t.Run("nil", func(t *testing.T) { + result := apiJobSubmissionToStructs(nil) + must.Nil(t, result) + }) + + t.Run("not nil", func(t *testing.T) { + result := apiJobSubmissionToStructs(&api.JobSubmission{ + Source: "source", + Format: "hcl2", + VariableFlags: map[string]string{"foo": "bar"}, + Variables: "variable", + }) + must.Eq(t, &structs.JobSubmission{ + Source: "source", + Format: "hcl2", + VariableFlags: map[string]string{"foo": "bar"}, + Variables: "variable", + }, result) + }) +} + func TestConversion_apiConnectSidecarTaskToStructs(t *testing.T) { ci.Parallel(t) require.Nil(t, apiConnectSidecarTaskToStructs(nil)) diff --git a/command/agent/search_endpoint_test.go b/command/agent/search_endpoint_test.go index 4ee6b8cff..63cb103e2 100644 --- a/command/agent/search_endpoint_test.go +++ b/command/agent/search_endpoint_test.go @@ -25,7 +25,7 @@ func createJobForTest(jobID string, s *TestAgent, t *testing.T) { job.ID = jobID job.TaskGroups[0].Count = 1 state := s.Agent.server.State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.NoError(t, err) } @@ -61,7 +61,7 @@ func createCmdJobForTest(name, cmd string, s *TestAgent, t *testing.T) *structs. job.TaskGroups[0].Tasks[0].Config["command"] = cmd job.TaskGroups[0].Count = 1 state := s.Agent.server.State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.NoError(t, err) return job } diff --git a/command/helpers.go b/command/helpers.go index 97cb918f0..34d037280 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -23,10 +23,15 @@ import ( "github.com/kr/text" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/ryanuber/columnize" ) +const ( + formatJSON = "json" + formatHCL1 = "hcl1" + formatHCL2 = "hcl2" +) + // maxLineLength is the maximum width of any line. const maxLineLength int = 78 @@ -425,19 +430,11 @@ func (j *JobGetter) Validate() error { } // ApiJob returns the Job struct from jobfile. -func (j *JobGetter) ApiJob(jpath string) (*api.Job, error) { - return j.ApiJobWithArgs(jpath, nil, nil, true) -} - -func (j *JobGetter) ApiJobWithArgs(jpath string, vars []string, varfiles []string, strict bool) (*api.Job, error) { - j.Vars = vars - j.VarFiles = varfiles - j.Strict = strict - +func (j *JobGetter) ApiJob(jpath string) (*api.JobSubmission, *api.Job, error) { return j.Get(jpath) } -func (j *JobGetter) Get(jpath string) (*api.Job, error) { +func (j *JobGetter) Get(jpath string) (*api.JobSubmission, *api.Job, error) { var jobfile io.Reader pathName := filepath.Base(jpath) switch jpath { @@ -450,23 +447,23 @@ func (j *JobGetter) Get(jpath string) (*api.Job, error) { pathName = "stdin" default: if len(jpath) == 0 { - return nil, fmt.Errorf("Error jobfile path has to be specified.") + return nil, nil, fmt.Errorf("Error jobfile path has to be specified.") } jobFile, err := os.CreateTemp("", "jobfile") if err != nil { - return nil, err + return nil, nil, err } defer os.Remove(jobFile.Name()) if err := jobFile.Close(); err != nil { - return nil, err + return nil, nil, err } // Get the pwd pwd, err := os.Getwd() if err != nil { - return nil, err + return nil, nil, err } client := &gg.Client{ @@ -479,11 +476,11 @@ func (j *JobGetter) Get(jpath string) (*api.Job, error) { } if err := client.Get(); err != nil { - return nil, fmt.Errorf("Error getting jobfile from %q: %v", jpath, err) + return nil, nil, fmt.Errorf("Error getting jobfile from %q: %v", jpath, err) } else { file, err := os.Open(jobFile.Name()) if err != nil { - return nil, fmt.Errorf("Error opening file %q: %v", jpath, err) + return nil, nil, fmt.Errorf("Error opening file %q: %v", jpath, err) } defer file.Close() jobfile = file @@ -491,12 +488,22 @@ func (j *JobGetter) Get(jpath string) (*api.Job, error) { } // Parse the JobFile - var jobStruct *api.Job + var jobStruct *api.Job // deserialized destination + var source bytes.Buffer // tee the original + var jobSubmission *api.JobSubmission // store the original and format + jobfile = io.TeeReader(jobfile, &source) var err error switch { case j.HCL1: jobStruct, err = jobspec.Parse(jobfile) + + // include the hcl1 source as the submission + jobSubmission = &api.JobSubmission{ + Source: source.String(), + Format: formatHCL1, + } case j.JSON: + // Support JSON files with both a top-level Job key as well as // ones without. eitherJob := struct { @@ -505,7 +512,7 @@ func (j *JobGetter) Get(jpath string) (*api.Job, error) { }{} if err := json.NewDecoder(jobfile).Decode(&eitherJob); err != nil { - return nil, fmt.Errorf("Failed to parse JSON job: %w", err) + return nil, nil, fmt.Errorf("Failed to parse JSON job: %w", err) } if eitherJob.NestedJob != nil { @@ -513,15 +520,21 @@ func (j *JobGetter) Get(jpath string) (*api.Job, error) { } else { jobStruct = &eitherJob.Job } - default: - var buf bytes.Buffer - _, err = io.Copy(&buf, jobfile) - if err != nil { - return nil, fmt.Errorf("Error reading job file from %s: %v", jpath, err) + + // include the json source as the submission + jobSubmission = &api.JobSubmission{ + Source: source.String(), + Format: formatJSON, } + default: + if _, err = io.Copy(&source, jobfile); err != nil { + return nil, nil, fmt.Errorf("Failed to parse HCL job: %w", err) + } + + // we are parsing HCL2, whether from a file or stdio jobStruct, err = jobspec2.ParseWithConfig(&jobspec2.ParseConfig{ Path: pathName, - Body: buf.Bytes(), + Body: source.Bytes(), ArgVars: j.Vars, AllowFS: true, VarFiles: j.VarFiles, @@ -529,18 +542,39 @@ func (j *JobGetter) Get(jpath string) (*api.Job, error) { Strict: j.Strict, }) + // submit the job with the submission with content from -var flags + jobSubmission = &api.JobSubmission{ + VariableFlags: extractVarFlags(j.Vars), + Source: source.String(), + Format: formatHCL2, + } if err != nil { - if _, merr := jobspec.Parse(&buf); merr == nil { - return nil, fmt.Errorf("Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:\n%v", err) + if _, merr := jobspec.Parse(&source); merr == nil { + return nil, nil, fmt.Errorf("Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:\n%v", err) } } } if err != nil { - return nil, fmt.Errorf("Error parsing job file from %s:\n%v", jpath, err) + return nil, nil, fmt.Errorf("Error parsing job file from %s:\n%v", jpath, err) } - return jobStruct, nil + return jobSubmission, jobStruct, nil +} + +// extractVarFlags is used to parse the values of -var command line arguments +// and turn them into a map to be used for submission. The result is never +// nil for convenience. +func extractVarFlags(slice []string) map[string]string { + m := make(map[string]string, len(slice)) + for _, s := range slice { + if tokens := strings.SplitN(s, "=", 2); len(tokens) == 1 { + m[tokens[0]] = "" + } else { + m[tokens[0]] = tokens[1] + } + } + return m } // mergeAutocompleteFlags is used to join multiple flag completion sets. diff --git a/command/helpers_test.go b/command/helpers_test.go index 6547a2d0e..a9e6cd69d 100644 --- a/command/helpers_test.go +++ b/command/helpers_test.go @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/nomad/helper/pointer" "github.com/kr/pretty" "github.com/mitchellh/cli" + "github.com/shoenig/test/must" "github.com/stretchr/testify/require" ) @@ -271,7 +272,7 @@ func TestJobGetter_LocalFile(t *testing.T) { } j := &JobGetter{} - aj, err := j.ApiJob(fh.Name()) + _, aj, err := j.ApiJob(fh.Name()) if err != nil { t.Fatalf("err: %s", err) } @@ -318,7 +319,7 @@ func TestJobGetter_LocalFile_InvalidHCL2(t *testing.T) { require.NoError(t, err) j := &JobGetter{} - _, err = j.ApiJob(fh.Name()) + _, _, err = j.ApiJob(fh.Name()) require.Error(t, err) exptMessage := "Failed to parse using HCL 2. Use the HCL 1" @@ -369,7 +370,13 @@ job "example" { _, err = vf.WriteString(fileVars + "\n") require.NoError(t, err) - j, err := (&JobGetter{}).ApiJobWithArgs(hclf.Name(), cliArgs, []string{vf.Name()}, true) + jg := &JobGetter{ + Vars: cliArgs, + VarFiles: []string{vf.Name()}, + Strict: true, + } + + _, j, err := jg.Get(hclf.Name()) require.NoError(t, err) require.NotNil(t, j) @@ -418,7 +425,13 @@ unsedVar2 = "from-varfile" _, err = vf.WriteString(fileVars + "\n") require.NoError(t, err) - j, err := (&JobGetter{}).ApiJobWithArgs(hclf.Name(), cliArgs, []string{vf.Name()}, false) + jg := &JobGetter{ + Vars: cliArgs, + VarFiles: []string{vf.Name()}, + Strict: false, + } + + _, j, err := jg.Get(hclf.Name()) require.NoError(t, err) require.NotNil(t, j) @@ -437,7 +450,7 @@ func TestJobGetter_HTTPServer(t *testing.T) { time.Sleep(100 * time.Millisecond) j := &JobGetter{} - aj, err := j.ApiJob("http://127.0.0.1:12345/") + _, aj, err := j.ApiJob("http://127.0.0.1:12345/") if err != nil { t.Fatalf("err: %s", err) } @@ -615,3 +628,21 @@ func TestUiErrorWriter(t *testing.T) { expectedErr += "and thensome more\n" require.Equal(t, expectedErr, errBuf.String()) } + +func Test_extractVarFlags(t *testing.T) { + ci.Parallel(t) + + t.Run("nil", func(t *testing.T) { + result := extractVarFlags(nil) + must.MapEmpty(t, result) + }) + + t.Run("complete", func(t *testing.T) { + result := extractVarFlags([]string{"one=1", "two=2", "three"}) + must.Eq(t, map[string]string{ + "one": "1", + "two": "2", + "three": "", + }, result) + }) +} diff --git a/command/job_allocs_test.go b/command/job_allocs_test.go index 57c637393..04ca144fb 100644 --- a/command/job_allocs_test.go +++ b/command/job_allocs_test.go @@ -66,7 +66,7 @@ func TestJobAllocsCommand_Run(t *testing.T) { // Create a job without an allocation job := mock.Job() state := srv.Agent.Server().State() - require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 100, job)) + require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) // Should display no match if the job doesn't have allocations code := cmd.Run([]string{"-address=" + url, job.ID}) @@ -109,7 +109,7 @@ func TestJobAllocsCommand_Template(t *testing.T) { // Create a job job := mock.Job() state := srv.Agent.Server().State() - require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 100, job)) + require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) // Inject a running allocation a := mock.Alloc() @@ -168,7 +168,7 @@ func TestJobAllocsCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) + require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} @@ -191,7 +191,7 @@ func TestJobAllocsCommand_ACL(t *testing.T) { // Create a job with an alloc. job := mock.Job() state := srv.Agent.Server().State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) must.NoError(t, err) a := mock.Alloc() diff --git a/command/job_deployments_test.go b/command/job_deployments_test.go index f8952d5c5..834da2352 100644 --- a/command/job_deployments_test.go +++ b/command/job_deployments_test.go @@ -64,7 +64,7 @@ func TestJobDeploymentsCommand_Run(t *testing.T) { // Create a job without a deployment job := mock.Job() state := srv.Agent.Server().State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 100, job)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) // Should display no match if the job doesn't have deployments if code := cmd.Run([]string{"-address=" + url, job.ID}); code != 0 { @@ -108,7 +108,7 @@ func TestJobDeploymentsCommand_Run_Latest(t *testing.T) { // Create a job without a deployment job := mock.Job() state := srv.Agent.Server().State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 100, job)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) // Should display no match if the job doesn't have deployments if code := cmd.Run([]string{"-address=" + url, "-latest", job.ID}); code != 0 { @@ -148,7 +148,7 @@ func TestJobDeploymentsCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} @@ -171,7 +171,7 @@ func TestJobDeploymentsCommand_ACL(t *testing.T) { // Create a job with a deployment. job := mock.Job() state := srv.Agent.Server().State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) must.NoError(t, err) d := mock.Deployment() diff --git a/command/job_dispatch_test.go b/command/job_dispatch_test.go index c1c6ab1bb..0ce09c614 100644 --- a/command/job_dispatch_test.go +++ b/command/job_dispatch_test.go @@ -67,7 +67,7 @@ func TestJobDispatchCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) + require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} @@ -80,7 +80,7 @@ func TestJobDispatchCommand_AutocompleteArgs(t *testing.T) { // Create a fake parameterized job j1 := mock.Job() j1.ParameterizedJob = &structs.ParameterizedJobConfig{} - require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 2000, j1)) + require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 2000, nil, j1)) prefix = j1.ID[:len(j1.ID)-5] args = complete.Args{Last: prefix} @@ -106,7 +106,7 @@ func TestJobDispatchCommand_ACL(t *testing.T) { job.Type = "batch" job.ParameterizedJob = &structs.ParameterizedJobConfig{} state := srv.Agent.Server().State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) must.NoError(t, err) testCases := []struct { diff --git a/command/job_eval_test.go b/command/job_eval_test.go index 8b3a1f588..676cf686d 100644 --- a/command/job_eval_test.go +++ b/command/job_eval_test.go @@ -80,7 +80,7 @@ func TestJobEvalCommand_Run(t *testing.T) { // Create a job job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 11, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 11, nil, job) require.Nil(err) job, err = state.JobByID(nil, structs.DefaultNamespace, job.ID) @@ -121,7 +121,7 @@ func TestJobEvalCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} @@ -144,7 +144,7 @@ func TestJobEvalCommand_ACL(t *testing.T) { // Create a job. job := mock.MinJob() state := srv.Agent.Server().State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) must.NoError(t, err) testCases := []struct { diff --git a/command/job_history_test.go b/command/job_history_test.go index c0f5a63d1..e10c619ea 100644 --- a/command/job_history_test.go +++ b/command/job_history_test.go @@ -59,7 +59,7 @@ func TestJobHistoryCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} @@ -82,7 +82,7 @@ func TestJobHistoryCommand_ACL(t *testing.T) { // Create a job. job := mock.MinJob() state := srv.Agent.Server().State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) must.NoError(t, err) testCases := []struct { diff --git a/command/job_inspect_test.go b/command/job_inspect_test.go index 067eda9c9..81ad5e279 100644 --- a/command/job_inspect_test.go +++ b/command/job_inspect_test.go @@ -79,7 +79,7 @@ func TestInspectCommand_AutocompleteArgs(t *testing.T) { state := srv.Agent.Server().State() j := mock.Job() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} @@ -102,7 +102,7 @@ func TestJobInspectCommand_ACL(t *testing.T) { // Create a job job := mock.MinJob() state := srv.Agent.Server().State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) must.NoError(t, err) testCases := []struct { diff --git a/command/job_periodic_force_test.go b/command/job_periodic_force_test.go index 6cefba335..3f28d4bd9 100644 --- a/command/job_periodic_force_test.go +++ b/command/job_periodic_force_test.go @@ -55,7 +55,7 @@ func TestJobPeriodicForceCommand_AutocompleteArgs(t *testing.T) { // Create a fake job, not periodic state := srv.Agent.Server().State() j := mock.Job() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) predictor := cmd.AutocompleteArgs() @@ -72,7 +72,7 @@ func TestJobPeriodicForceCommand_AutocompleteArgs(t *testing.T) { ProhibitOverlap: true, TimeZone: "test zone", } - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, j2)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j2)) res = predictor.Predict(complete.Args{Last: j2.ID[:len(j.ID)-5]}) require.Equal(t, []string{j2.ID}, res) diff --git a/command/job_plan.go b/command/job_plan.go index 10e8af7fa..d3d45fbd0 100644 --- a/command/job_plan.go +++ b/command/job_plan.go @@ -197,7 +197,7 @@ func (c *JobPlanCommand) Run(args []string) int { path := args[0] // Get Job struct from Jobfile - job, err := c.JobGetter.Get(path) + _, job, err := c.JobGetter.Get(path) if err != nil { c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err)) return 255 diff --git a/command/job_promote_test.go b/command/job_promote_test.go index 68c7c488f..b884ded8d 100644 --- a/command/job_promote_test.go +++ b/command/job_promote_test.go @@ -60,7 +60,7 @@ func TestJobPromoteCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} @@ -83,7 +83,7 @@ func TestJobPromoteCommand_ACL(t *testing.T) { // Create a job. job := mock.MinJob() state := srv.Agent.Server().State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) must.NoError(t, err) testCases := []struct { diff --git a/command/job_revert_test.go b/command/job_revert_test.go index 850a8c1cf..02b1a55c8 100644 --- a/command/job_revert_test.go +++ b/command/job_revert_test.go @@ -59,7 +59,7 @@ func TestJobRevertCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} @@ -158,7 +158,7 @@ namespace "default" { // Create a job. job := mock.MinJob() state := srv.Agent.Server().State() - err := state.UpsertJob(structs.MsgTypeTestSetup, uint64(300+i), job) + err := state.UpsertJob(structs.MsgTypeTestSetup, uint64(300+i), nil, job) must.NoError(t, err) defer func() { client.Jobs().Deregister(job.ID, true, &api.WriteOptions{ @@ -172,7 +172,7 @@ namespace "default" { "test": tc.name, } newJob.Version = uint64(i) - err = state.UpsertJob(structs.MsgTypeTestSetup, uint64(301+i), newJob) + err = state.UpsertJob(structs.MsgTypeTestSetup, uint64(301+i), nil, newJob) must.NoError(t, err) if tc.aclPolicy != "" { diff --git a/command/job_run.go b/command/job_run.go index d4dc701ac..13c8c705c 100644 --- a/command/job_run.go +++ b/command/job_run.go @@ -236,7 +236,7 @@ func (c *JobRunCommand) Run(args []string) int { } // Get Job struct from Jobfile - job, err := c.JobGetter.Get(args[0]) + sub, job, err := c.JobGetter.Get(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err)) return 1 @@ -320,6 +320,7 @@ func (c *JobRunCommand) Run(args []string) int { PolicyOverride: override, PreserveCounts: preserveCounts, EvalPriority: evalPriority, + Submission: sub, } if enforce { opts.EnforceIndex = true diff --git a/command/job_scale_test.go b/command/job_scale_test.go index fa947db60..2e10e9541 100644 --- a/command/job_scale_test.go +++ b/command/job_scale_test.go @@ -244,7 +244,7 @@ namespace "default" { // Create a job. job := mock.MinJob() state := srv.Agent.Server().State() - err := state.UpsertJob(structs.MsgTypeTestSetup, uint64(300+i), job) + err := state.UpsertJob(structs.MsgTypeTestSetup, uint64(300+i), nil, job) must.NoError(t, err) defer func() { client.Jobs().Deregister(job.ID, true, &api.WriteOptions{ diff --git a/command/job_scaling_events_test.go b/command/job_scaling_events_test.go index 471b2b63e..1c6b195b5 100644 --- a/command/job_scaling_events_test.go +++ b/command/job_scaling_events_test.go @@ -110,7 +110,7 @@ func TestJobScalingEventsCommand_ACL(t *testing.T) { // Create a job. job := mock.MinJob() state := srv.Agent.Server().State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) must.NoError(t, err) testCases := []struct { diff --git a/command/job_status_test.go b/command/job_status_test.go index 28fb7c655..a3b979297 100644 --- a/command/job_status_test.go +++ b/command/job_status_test.go @@ -265,7 +265,7 @@ func TestJobStatusCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} @@ -372,7 +372,7 @@ func TestJobStatusCommand_RescheduleEvals(t *testing.T) { // Create state store objects for job, alloc and followup eval with a future WaitUntil value j := mock.Job() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 900, j)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 900, nil, j)) e := mock.Eval() e.WaitUntil = time.Now().Add(1 * time.Hour) @@ -408,7 +408,7 @@ func TestJobStatusCommand_ACL(t *testing.T) { // Create a job. job := mock.MinJob() state := srv.Agent.Server().State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) must.NoError(t, err) testCases := []struct { diff --git a/command/job_stop_test.go b/command/job_stop_test.go index b4b8ccb67..c6a9e0e52 100644 --- a/command/job_stop_test.go +++ b/command/job_stop_test.go @@ -142,7 +142,7 @@ func TestStopCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} @@ -232,7 +232,7 @@ namespace "default" { // Create a job. job := mock.MinJob() state := srv.Agent.Server().State() - err := state.UpsertJob(structs.MsgTypeTestSetup, uint64(300+i), job) + err := state.UpsertJob(structs.MsgTypeTestSetup, uint64(300+i), nil, job) must.NoError(t, err) defer func() { client.Jobs().Deregister(job.ID, true, &api.WriteOptions{ diff --git a/command/job_validate.go b/command/job_validate.go index 658cda97f..f7f4a399a 100644 --- a/command/job_validate.go +++ b/command/job_validate.go @@ -144,7 +144,7 @@ func (c *JobValidateCommand) Run(args []string) int { } // Get Job struct from Jobfile - job, err := c.JobGetter.Get(args[0]) + _, job, err := c.JobGetter.Get(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err)) return 1 diff --git a/command/status_test.go b/command/status_test.go index 8dffa5cd1..65da077f6 100644 --- a/command/status_test.go +++ b/command/status_test.go @@ -32,7 +32,7 @@ func TestStatusCommand_Run_JobStatus(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) // Query to check the job status if code := cmd.Run([]string{"-address=" + url, j.ID}); code != 0 { @@ -60,8 +60,8 @@ func TestStatusCommand_Run_JobStatus_MultiMatch(t *testing.T) { j := mock.Job() j2 := mock.Job() j2.ID = fmt.Sprintf("%s-more", j.ID) - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j)) - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1001, j2)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, j2)) // Query to check the job status if code := cmd.Run([]string{"-address=" + url, j.ID}); code != 0 { @@ -204,7 +204,7 @@ func TestStatusCommand_Run_NoPrefix(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() job := mock.Job() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, job)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) // Query to check status if code := cmd.Run([]string{"-address=" + url}); code != 0 { @@ -230,7 +230,7 @@ func TestStatusCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() job := mock.Job() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, job)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) prefix := job.ID[:len(job.ID)-5] args := complete.Args{Last: prefix} diff --git a/jobspec2/parse.go b/jobspec2/parse.go index 0a098608a..4b91ba76f 100644 --- a/jobspec2/parse.go +++ b/jobspec2/parse.go @@ -59,15 +59,20 @@ type ParseConfig struct { // Body is the HCL body Body []byte - // AllowFS enables HCL functions that require file system accecss + // AllowFS enables HCL functions that require file system access AllowFS bool // ArgVars is the CLI -var arguments ArgVars []string - // VarFiles is the paths of variable data files + // VarFiles is the paths of variable data files that should be read during + // parsing. VarFiles []string + // VarContent is the content of variable data known without reading an + // actual var file during parsing. + VarContent string + // Envs represent process environment variable Envs []string @@ -98,6 +103,14 @@ func decode(c *jobConfig) error { diags = append(diags, ds...) } + if config.VarContent != "" { + hclFile, hclDiagnostics := parseHCLOrJSON([]byte(config.VarContent), "input.hcl") + if hclDiagnostics.HasErrors() { + return fmt.Errorf("unable to parse var content: %v", hclDiagnostics.Error()) + } + config.parsedVarFiles = append(config.parsedVarFiles, hclFile) + } + // Return early if the input job or variable files are not valid. // Decoding and evaluating invalid files may result in unexpected results. if diags.HasErrors() { diff --git a/nomad/client_alloc_endpoint_test.go b/nomad/client_alloc_endpoint_test.go index b4053ab9d..0bb91171a 100644 --- a/nomad/client_alloc_endpoint_test.go +++ b/nomad/client_alloc_endpoint_test.go @@ -327,7 +327,7 @@ func TestClientAllocations_GarbageCollect_Local(t *testing.T) { // Upsert the allocation state := s.State() - require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation @@ -385,7 +385,7 @@ func TestClientAllocations_GarbageCollect_Local_ACL(t *testing.T) { // Upsert the allocation state := s.State() alloc := mock.Alloc() - require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, alloc.Job)) + require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, nil, alloc.Job)) require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*nstructs.Allocation{alloc})) cases := []struct { @@ -495,9 +495,9 @@ func TestClientAllocations_GarbageCollect_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) - require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation @@ -616,7 +616,7 @@ func TestClientAllocations_Stats_Local(t *testing.T) { // Upsert the allocation state := s.State() - require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation @@ -675,7 +675,7 @@ func TestClientAllocations_Stats_Local_ACL(t *testing.T) { // Upsert the allocation state := s.State() alloc := mock.Alloc() - require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, alloc.Job)) + require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, nil, alloc.Job)) require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*nstructs.Allocation{alloc})) cases := []struct { @@ -772,9 +772,9 @@ func TestClientAllocations_Stats_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) - require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation @@ -856,7 +856,7 @@ func TestClientAllocations_Restart_Local(t *testing.T) { // Upsert the allocation state := s.State() - require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation @@ -970,9 +970,9 @@ func TestClientAllocations_Restart_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) - require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation @@ -1031,7 +1031,7 @@ func TestClientAllocations_Restart_ACL(t *testing.T) { // Upsert the allocation state := s.State() alloc := mock.Alloc() - require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, alloc.Job)) + require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, nil, alloc.Job)) require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*nstructs.Allocation{alloc})) cases := []struct { @@ -1139,10 +1139,10 @@ func TestAlloc_ExecStreaming(t *testing.T) { // Upsert the allocation localState := localServer.State() - require.Nil(t, localState.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(t, localState.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(t, localState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) remoteState := remoteServer.State() - require.Nil(t, remoteState.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(t, remoteState.UpsertJob(nstructs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(t, remoteState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a})) // Wait for the client to run the allocation diff --git a/nomad/client_fs_endpoint_test.go b/nomad/client_fs_endpoint_test.go index 981ae7ebd..8074ac7d4 100644 --- a/nomad/client_fs_endpoint_test.go +++ b/nomad/client_fs_endpoint_test.go @@ -68,7 +68,7 @@ func TestClientFS_List_Local(t *testing.T) { // Upsert the allocation state := s.State() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation @@ -128,7 +128,7 @@ func TestClientFS_List_ACL(t *testing.T) { // Upsert the allocation state := s.State() alloc := mock.Alloc() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, alloc.Job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) cases := []struct { @@ -228,9 +228,9 @@ func TestClientFS_List_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) - require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation @@ -344,7 +344,7 @@ func TestClientFS_Stat_Local(t *testing.T) { // Upsert the allocation state := s.State() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation @@ -404,7 +404,7 @@ func TestClientFS_Stat_ACL(t *testing.T) { // Upsert the allocation state := s.State() alloc := mock.Alloc() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, alloc.Job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) cases := []struct { @@ -504,9 +504,9 @@ func TestClientFS_Stat_Remote(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) - require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation @@ -636,7 +636,7 @@ func TestClientFS_Streaming_ACL(t *testing.T) { // Upsert the allocation state := s.State() alloc := mock.Alloc() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, alloc.Job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) cases := []struct { @@ -777,7 +777,7 @@ func TestClientFS_Streaming_Local(t *testing.T) { // Upsert the allocation state := s.State() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation @@ -913,7 +913,7 @@ func TestClientFS_Streaming_Local_Follow(t *testing.T) { // Upsert the allocation state := s.State() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation @@ -1056,9 +1056,9 @@ func TestClientFS_Streaming_Remote_Server(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) - require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation @@ -1202,7 +1202,7 @@ func TestClientFS_Streaming_Remote_Region(t *testing.T) { // Upsert the allocation state2 := s2.State() - require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation @@ -1465,7 +1465,7 @@ func TestClientFS_Logs_ACL(t *testing.T) { // Upsert the allocation state := s.State() alloc := mock.Alloc() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, alloc.Job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) cases := []struct { @@ -1606,7 +1606,7 @@ func TestClientFS_Logs_Local(t *testing.T) { // Upsert the allocation state := s.State() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation @@ -1743,7 +1743,7 @@ func TestClientFS_Logs_Local_Follow(t *testing.T) { // Upsert the allocation state := s.State() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation @@ -1887,9 +1887,9 @@ func TestClientFS_Logs_Remote_Server(t *testing.T) { // Upsert the allocation state1 := s1.State() state2 := s2.State() - require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state1.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state1.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) - require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation @@ -2034,7 +2034,7 @@ func TestClientFS_Logs_Remote_Region(t *testing.T) { // Upsert the allocation state2 := s2.State() - require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, a.Job)) + require.Nil(state2.UpsertJob(structs.MsgTypeTestSetup, 999, nil, a.Job)) require.Nil(state2.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a})) // Wait for the client to run the allocation diff --git a/nomad/config.go b/nomad/config.go index adbca4bb9..10912d37f 100644 --- a/nomad/config.go +++ b/nomad/config.go @@ -67,6 +67,11 @@ type Config struct { // EventBufferSize is the amount of events to hold in memory. EventBufferSize int64 + // JobMaxSourceSize limits the maximum size of a jobs source hcl/json + // before being discarded automatically. A value of zero indicates no job + // sources will be stored. + JobMaxSourceSize int + // LogOutput is the location to write logs to. If this is not set, // logs will go to stderr. LogOutput io.Writer diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index a574ea5d4..0008ab8f3 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -47,7 +47,7 @@ func TestCoreScheduler_EvalGC(t *testing.T) { Attempts: 0, Interval: 0 * time.Second, } - err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, job) + err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job) require.Nil(t, err) // Insert "dead" alloc @@ -144,7 +144,7 @@ func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { job := mock.Job() job.ID = eval.JobID - err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, job) + err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job) require.Nil(t, err) // Insert failed alloc with an old reschedule attempt, can be GCed @@ -242,7 +242,7 @@ func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { job.ID = eval.JobID job.Stop = true - err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, job) + err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job) require.Nil(t, err) // Insert failed alloc with a recent reschedule attempt @@ -321,7 +321,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { Attempts: 0, Interval: 0 * time.Second, } - err := store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx+1, stoppedJob) + err := store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx+1, nil, stoppedJob) must.NoError(t, err) stoppedJobEval := mock.Eval() @@ -355,7 +355,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { deadJob := mock.Job() deadJob.Type = structs.JobTypeBatch deadJob.Status = structs.JobStatusDead - err = store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, deadJob) + err = store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, nil, deadJob) must.NoError(t, err) deadJobEval := mock.Eval() @@ -393,7 +393,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { activeJob := mock.Job() activeJob.Type = structs.JobTypeBatch activeJob.Status = structs.JobStatusDead - err = store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, activeJob) + err = store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, nil, activeJob) must.NoError(t, err) activeJobEval := mock.Eval() @@ -441,7 +441,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { purgedJob := mock.Job() purgedJob.Type = structs.JobTypeBatch purgedJob.Status = structs.JobStatusDead - err = store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, purgedJob) + err = store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, nil, purgedJob) must.NoError(t, err) purgedJobEval := mock.Eval() @@ -673,7 +673,7 @@ func TestCoreScheduler_EvalGC_Partial(t *testing.T) { Attempts: 0, Interval: 0 * time.Second, } - err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, job) + err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job) require.Nil(t, err) // Update the time tables to make this work @@ -764,7 +764,7 @@ func TestCoreScheduler_EvalGC_Force(t *testing.T) { Attempts: 0, Interval: 0 * time.Second, } - err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, job) + err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job) require.Nil(t, err) // Insert "dead" alloc @@ -1043,7 +1043,7 @@ func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.Status = structs.JobStatusDead - err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -1171,7 +1171,7 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { Attempts: 0, Interval: 0 * time.Second, } - err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -1312,7 +1312,7 @@ func TestCoreScheduler_JobGC_OneShot(t *testing.T) { store := s1.fsm.State() job := mock.Job() job.Type = structs.JobTypeBatch - err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -1429,7 +1429,7 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) { Attempts: 0, Interval: 0 * time.Second, } - err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -1534,7 +1534,7 @@ func TestCoreScheduler_JobGC_Force(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.Status = structs.JobStatusDead - err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -1602,7 +1602,7 @@ func TestCoreScheduler_JobGC_Parameterized(t *testing.T) { job.ParameterizedJob = &structs.ParameterizedJobConfig{ Payload: structs.DispatchPayloadRequired, } - err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -1634,7 +1634,7 @@ func TestCoreScheduler_JobGC_Parameterized(t *testing.T) { // Mark the job as stopped and try again job2 := job.Copy() job2.Stop = true - err = store.UpsertJob(structs.MsgTypeTestSetup, 2000, job2) + err = store.UpsertJob(structs.MsgTypeTestSetup, 2000, nil, job2) if err != nil { t.Fatalf("err: %v", err) } @@ -1677,7 +1677,7 @@ func TestCoreScheduler_JobGC_Periodic(t *testing.T) { // Insert a parameterized job. store := s1.fsm.State() job := mock.PeriodicJob() - err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -1709,7 +1709,7 @@ func TestCoreScheduler_JobGC_Periodic(t *testing.T) { // Mark the job as stopped and try again job2 := job.Copy() job2.Stop = true - err = store.UpsertJob(structs.MsgTypeTestSetup, 2000, job2) + err = store.UpsertJob(structs.MsgTypeTestSetup, 2000, nil, job2) if err != nil { t.Fatalf("err: %v", err) } @@ -2314,7 +2314,7 @@ func TestCoreScheduler_CSIVolumeClaimGC(t *testing.T) { job.ID = eval.JobID job.Status = structs.JobStatusRunning index++ - err = store.UpsertJob(structs.MsgTypeTestSetup, index, job) + err = store.UpsertJob(structs.MsgTypeTestSetup, index, nil, job) require.NoError(t, err) alloc1, alloc2 := mock.Alloc(), mock.Alloc() diff --git a/nomad/deployment_endpoint_test.go b/nomad/deployment_endpoint_test.go index b4736a928..94551e44a 100644 --- a/nomad/deployment_endpoint_test.go +++ b/nomad/deployment_endpoint_test.go @@ -35,7 +35,7 @@ func TestDeploymentEndpoint_GetDeployment(t *testing.T) { d.JobID = j.ID state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Lookup the deployments @@ -67,7 +67,7 @@ func TestDeploymentEndpoint_GetDeployment_ACL(t *testing.T) { d.JobID = j.ID state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Create the namespace policy and tokens @@ -124,8 +124,8 @@ func TestDeploymentEndpoint_GetDeployment_Blocking(t *testing.T) { d2 := mock.Deployment() d2.JobID = j2.ID - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 98, j1), "UpsertJob") - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 99, j2), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 98, nil, j1), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 99, nil, j2), "UpsertJob") // Upsert a deployment we are not interested in first. time.AfterFunc(100*time.Millisecond, func() { @@ -173,7 +173,7 @@ func TestDeploymentEndpoint_Fail(t *testing.T) { d.JobID = j.ID state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Mark the deployment as failed @@ -223,7 +223,7 @@ func TestDeploymentEndpoint_Fail_ACL(t *testing.T) { d.JobID = j.ID state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Create the namespace policy and tokens @@ -300,7 +300,7 @@ func TestDeploymentEndpoint_Fail_Rollback(t *testing.T) { j.TaskGroups[0].Update = structs.DefaultUpdateStrategy.Copy() j.TaskGroups[0].Update.MaxParallel = 2 j.TaskGroups[0].Update.AutoRevert = true - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") // Create the second job, deployment and alloc j2 := j.Copy() @@ -317,7 +317,7 @@ func TestDeploymentEndpoint_Fail_Rollback(t *testing.T) { a.JobID = j.ID a.DeploymentID = d.ID - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j2), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j2), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") @@ -377,7 +377,7 @@ func TestDeploymentEndpoint_Pause(t *testing.T) { d.JobID = j.ID state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Mark the deployment as failed @@ -420,7 +420,7 @@ func TestDeploymentEndpoint_Pause_ACL(t *testing.T) { d.JobID = j.ID state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Create the namespace policy and tokens @@ -499,7 +499,7 @@ func TestDeploymentEndpoint_Promote(t *testing.T) { } state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") @@ -564,7 +564,7 @@ func TestDeploymentEndpoint_Promote_ACL(t *testing.T) { } state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") @@ -650,7 +650,7 @@ func TestDeploymentEndpoint_SetAllocHealth(t *testing.T) { a.DeploymentID = d.ID state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") @@ -718,7 +718,7 @@ func TestDeploymentEndpoint_SetAllocHealth_ACL(t *testing.T) { a.DeploymentID = d.ID state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") @@ -808,7 +808,7 @@ func TestDeploymentEndpoint_SetAllocHealth_Rollback(t *testing.T) { j.TaskGroups[0].Update = structs.DefaultUpdateStrategy.Copy() j.TaskGroups[0].Update.MaxParallel = 2 j.TaskGroups[0].Update.AutoRevert = true - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") // Create the second job, deployment and alloc j2 := j.Copy() @@ -824,7 +824,7 @@ func TestDeploymentEndpoint_SetAllocHealth_Rollback(t *testing.T) { a.JobID = j.ID a.DeploymentID = d.ID - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j2), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j2), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") @@ -898,7 +898,7 @@ func TestDeploymentEndpoint_SetAllocHealth_NoRollback(t *testing.T) { j.TaskGroups[0].Update = structs.DefaultUpdateStrategy.Copy() j.TaskGroups[0].Update.MaxParallel = 2 j.TaskGroups[0].Update.AutoRevert = true - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") // Create the second job, deployment and alloc. Job has same spec as original j2 := j.Copy() @@ -913,7 +913,7 @@ func TestDeploymentEndpoint_SetAllocHealth_NoRollback(t *testing.T) { a.JobID = j.ID a.DeploymentID = d.ID - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j2), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j2), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") @@ -982,7 +982,7 @@ func TestDeploymentEndpoint_List(t *testing.T) { d.JobID = j.ID state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Lookup the deployments @@ -1021,7 +1021,7 @@ func TestDeploymentEndpoint_List(t *testing.T) { d2.Namespace = "prod" d2.JobID = j2.ID assert.Nil(state.UpsertNamespaces(1001, []*structs.Namespace{{Name: "prod"}})) - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1002, j2), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1002, nil, j2), "UpsertJob") assert.Nil(state.UpsertDeployment(1003, d2), "UpsertDeployment") // Lookup the deployments with wildcard namespace @@ -1139,7 +1139,7 @@ func TestDeploymentEndpoint_List_ACL(t *testing.T) { d.JobID = j.ID state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") // Create the namespace policy and tokens @@ -1208,7 +1208,7 @@ func TestDeploymentEndpoint_List_Blocking(t *testing.T) { d := mock.Deployment() d.JobID = j.ID - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j), "UpsertJob") // Upsert alloc triggers watches time.AfterFunc(100*time.Millisecond, func() { @@ -1485,7 +1485,7 @@ func TestDeploymentEndpoint_Allocations(t *testing.T) { summary := mock.JobSummary(a.JobID) state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") assert.Nil(state.UpsertJobSummary(999, summary), "UpsertJobSummary") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") @@ -1523,7 +1523,7 @@ func TestDeploymentEndpoint_Allocations_ACL(t *testing.T) { summary := mock.JobSummary(a.JobID) state := s1.fsm.State() - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, j), "UpsertJob") assert.Nil(state.UpsertJobSummary(999, summary), "UpsertJobSummary") assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{a}), "UpsertAllocs") @@ -1598,7 +1598,7 @@ func TestDeploymentEndpoint_Allocations_Blocking(t *testing.T) { a.DeploymentID = d.ID summary := mock.JobSummary(a.JobID) - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, j), "UpsertJob") + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, j), "UpsertJob") assert.Nil(state.UpsertDeployment(2, d), "UpsertDeployment") assert.Nil(state.UpsertJobSummary(3, summary), "UpsertJobSummary") diff --git a/nomad/deploymentwatcher/deployments_watcher_test.go b/nomad/deploymentwatcher/deployments_watcher_test.go index f4bf19106..4e18187ba 100644 --- a/nomad/deploymentwatcher/deployments_watcher_test.go +++ b/nomad/deploymentwatcher/deployments_watcher_test.go @@ -43,9 +43,9 @@ func TestWatcher_WatchDeployments(t *testing.T) { // Create three jobs j1, j2, j3 := mock.Job(), mock.Job(), mock.Job() - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, 100, j1)) - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, 101, j2)) - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, 102, j3)) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, j1)) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, j2)) + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, 102, nil, j3)) // Create three deployments all running d1, d2, d3 := mock.Deployment(), mock.Deployment(), mock.Deployment() @@ -158,7 +158,7 @@ func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { j := mock.Job() d := mock.Deployment() d.JobID = j.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // require that we get a call to UpsertDeploymentAllocHealth @@ -204,7 +204,7 @@ func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { d.JobID = j.ID a := mock.Alloc() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") @@ -249,7 +249,7 @@ func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { d.JobID = j.ID a := mock.Alloc() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") @@ -307,7 +307,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { d.TaskGroups["web"].AutoRevert = true a := mock.Alloc() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") @@ -317,7 +317,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { // Modify the job to make its specification different j2.Meta["foo"] = "bar" - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j2), "UpsertJob2") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob2") // require that we get a call to UpsertDeploymentAllocHealth matchConfig := &matchDeploymentAllocHealthRequestConfig{ @@ -374,7 +374,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { d.TaskGroups["web"].AutoRevert = true a := mock.Alloc() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") @@ -382,7 +382,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { j2 := j.Copy() j2.Stable = false - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j2), "UpsertJob2") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob2") // require that we get a call to UpsertDeploymentAllocHealth matchConfig := &matchDeploymentAllocHealthRequestConfig{ @@ -442,7 +442,7 @@ func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { Healthy: pointer.Of(true), } a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") @@ -499,7 +499,7 @@ func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { d.TaskGroups[a.TaskGroup].PlacedCanaries = []string{a.ID} d.TaskGroups[a.TaskGroup].DesiredCanaries = 2 a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") @@ -610,7 +610,7 @@ func TestWatcher_AutoPromoteDeployment(t *testing.T) { d.TaskGroups[ca1.TaskGroup].PlacedCanaries = []string{ca1.ID, ca2.ID} d.TaskGroups[ca1.TaskGroup].DesiredCanaries = 2 d.TaskGroups[ra1.TaskGroup].PlacedAllocs = 2 - require.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.NoError(t, m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.NoError(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{ca1, ca2, ra1, ra2}), "UpsertAllocs") @@ -745,7 +745,7 @@ func TestWatcher_AutoPromoteDeployment_UnhealthyCanaries(t *testing.T) { d.TaskGroups[ca1.TaskGroup].PlacedCanaries = []string{ca1.ID, ca2.ID, ca3.ID} d.TaskGroups[ca1.TaskGroup].DesiredCanaries = 2 - require.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.NoError(t, m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.NoError(t, m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.NoError(t, m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{ca1, ca2, ca3}), "UpsertAllocs") @@ -848,7 +848,7 @@ func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { j := mock.Job() d := mock.Deployment() d.JobID = j.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // require that we get a call to UpsertDeploymentStatusUpdate @@ -891,7 +891,7 @@ func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { d := mock.Deployment() d.JobID = j.ID d.Status = structs.DeploymentStatusPaused - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // require that we get a call to UpsertDeploymentStatusUpdate @@ -931,7 +931,7 @@ func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { d := mock.Deployment() d.JobID = j.ID d.Status = structs.DeploymentStatusPaused - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // require that we get a call to UpsertDeploymentStatusUpdate @@ -971,7 +971,7 @@ func TestWatcher_PauseDeployment_Unpause_Running(t *testing.T) { j := mock.Job() d := mock.Deployment() d.JobID = j.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // require that we get a call to UpsertDeploymentStatusUpdate @@ -1011,7 +1011,7 @@ func TestWatcher_FailDeployment_Running(t *testing.T) { j := mock.Job() d := mock.Deployment() d.JobID = j.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") // require that we get a call to UpsertDeploymentStatusUpdate @@ -1059,7 +1059,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { d.TaskGroups["web"].AutoRevert = true a := mock.Alloc() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") @@ -1068,7 +1068,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { // Modify the job to make its specification different j2.Meta["foo"] = "bar" j2.Stable = false - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j2), "UpsertJob2") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob2") // require that we will get a update allocation call only once. This will // verify that the watcher is batching allocation changes @@ -1181,7 +1181,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { a.CreateTime = now.UnixNano() a.ModifyTime = now.UnixNano() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") @@ -1276,7 +1276,7 @@ func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { a2.ModifyTime = now.UnixNano() a2.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a, a2}), "UpsertAllocs") @@ -1370,7 +1370,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { a.CreateTime = now.UnixNano() a.ModifyTime = now.UnixNano() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") @@ -1462,7 +1462,7 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { Healthy: pointer.Of(true), Timestamp: now, } - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") @@ -1562,7 +1562,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { }, } - require.NoError(m.state.UpsertJob(mtype, m.nextIndex(), j)) + require.NoError(m.state.UpsertJob(mtype, m.nextIndex(), nil, j)) require.NoError(m.state.UpsertDeployment(m.nextIndex(), d)) // require that we get a call to UpsertDeploymentPromotion @@ -1741,7 +1741,7 @@ func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { d := mock.Deployment() d.JobID = j.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") a := mock.Alloc() @@ -1802,7 +1802,7 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { d.TaskGroups["web"].AutoRevert = true a := mock.Alloc() a.DeploymentID = d.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a}), "UpsertAllocs") @@ -1810,7 +1810,7 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { j2 := j.Copy() // Modify the job to make its specification different j2.Stable = false - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j2), "UpsertJob2") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob2") // require that we will get a createEvaluation call only once. This will // verify that the watcher is batching allocation changes @@ -1925,8 +1925,8 @@ func TestWatcher_BatchAllocUpdates(t *testing.T) { a2.JobID = j2.ID a2.DeploymentID = d2.ID - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j1), "UpsertJob") - require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j2), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j1), "UpsertJob") + require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), nil, j2), "UpsertJob") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d1), "UpsertDeployment") require.Nil(m.state.UpsertDeployment(m.nextIndex(), d2), "UpsertDeployment") require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a1}), "UpsertAllocs") diff --git a/nomad/deploymentwatcher/testutil_test.go b/nomad/deploymentwatcher/testutil_test.go index f38f18c7b..15f05c707 100644 --- a/nomad/deploymentwatcher/testutil_test.go +++ b/nomad/deploymentwatcher/testutil_test.go @@ -92,7 +92,7 @@ func matchUpdateAllocDesiredTransitionReschedule(allocIDs []string) func(update func (m *mockBackend) UpsertJob(job *structs.Job) (uint64, error) { m.Called(job) i := m.nextIndex() - return i, m.state.UpsertJob(structs.MsgTypeTestSetup, i, job) + return i, m.state.UpsertJob(structs.MsgTypeTestSetup, i, nil, job) } func (m *mockBackend) UpdateDeploymentStatus(u *structs.DeploymentStatusUpdateRequest) (uint64, error) { diff --git a/nomad/drainer/draining_node_test.go b/nomad/drainer/draining_node_test.go index f33cc79ff..efbe260ab 100644 --- a/nomad/drainer/draining_node_test.go +++ b/nomad/drainer/draining_node_test.go @@ -72,7 +72,7 @@ func TestDrainingNode_Table(t *testing.T) { setup: func(t *testing.T, dn *drainingNode) { alloc := mock.BatchAlloc() alloc.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, alloc.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc})) }, }, @@ -84,7 +84,7 @@ func TestDrainingNode_Table(t *testing.T) { setup: func(t *testing.T, dn *drainingNode) { alloc := mock.Alloc() alloc.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, alloc.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc})) }, }, @@ -96,7 +96,7 @@ func TestDrainingNode_Table(t *testing.T) { setup: func(t *testing.T, dn *drainingNode) { alloc := mock.SystemAlloc() alloc.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, alloc.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, alloc.Job)) require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, []*structs.Allocation{alloc})) }, }, @@ -109,7 +109,7 @@ func TestDrainingNode_Table(t *testing.T) { allocs := []*structs.Allocation{mock.Alloc(), mock.BatchAlloc(), mock.SystemAlloc()} for _, a := range allocs { a.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, a.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) @@ -131,7 +131,7 @@ func TestDrainingNode_Table(t *testing.T) { allocs := []*structs.Allocation{mock.Alloc(), mock.BatchAlloc(), mock.SystemAlloc()} for _, a := range allocs { a.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, a.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) @@ -149,7 +149,7 @@ func TestDrainingNode_Table(t *testing.T) { allocs := []*structs.Allocation{mock.Alloc(), mock.BatchAlloc(), mock.SystemAlloc()} for _, a := range allocs { a.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, a.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) @@ -168,7 +168,7 @@ func TestDrainingNode_Table(t *testing.T) { allocs := []*structs.Allocation{mock.Alloc(), mock.BatchAlloc(), mock.SystemAlloc()} for _, a := range allocs { a.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, a.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) @@ -194,7 +194,7 @@ func TestDrainingNode_Table(t *testing.T) { } for _, a := range allocs { a.NodeID = dn.node.ID - require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, a.Job)) + require.Nil(t, dn.state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, a.Job)) } require.Nil(t, dn.state.UpsertAllocs(structs.MsgTypeTestSetup, 102, allocs)) diff --git a/nomad/drainer/watch_jobs_test.go b/nomad/drainer/watch_jobs_test.go index 0fc76bfd1..7029eb114 100644 --- a/nomad/drainer/watch_jobs_test.go +++ b/nomad/drainer/watch_jobs_test.go @@ -134,7 +134,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { jnss[i] = structs.NamespacedID{Namespace: job.Namespace, ID: job.ID} job.TaskGroups[0].Migrate.MaxParallel = 3 job.TaskGroups[0].Count = count - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, index, job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, index, nil, job)) index++ var allocs []*structs.Allocation @@ -571,7 +571,7 @@ func testHandleTaskGroup(t *testing.T, tc handleTaskGroupTestCase) { if tc.MaxParallel > 0 { job.TaskGroups[0].Migrate.MaxParallel = tc.MaxParallel } - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 102, job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 102, nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -623,7 +623,7 @@ func TestHandleTaskGroup_Migrations(t *testing.T) { require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 100, n)) job := mock.Job() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 101, job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job)) // Create 10 done allocs var allocs []*structs.Allocation @@ -692,7 +692,7 @@ func TestHandleTaskGroup_GarbageCollectedNode(t *testing.T) { require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 100, n)) job := mock.Job() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 101, job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job)) // Create 10 done allocs var allocs []*structs.Allocation diff --git a/nomad/drainer/watch_nodes_test.go b/nomad/drainer/watch_nodes_test.go index cd2b53516..71a9b9df1 100644 --- a/nomad/drainer/watch_nodes_test.go +++ b/nomad/drainer/watch_nodes_test.go @@ -37,7 +37,7 @@ func TestNodeDrainWatcher_AddNodes(t *testing.T) { // Create a job with a running alloc on each node job := mock.Job() jobID := structs.NamespacedID{Namespace: job.Namespace, ID: job.ID} - must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 101, job)) + must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job)) alloc1 := mock.Alloc() alloc1.JobID = job.ID @@ -211,7 +211,7 @@ func testNodeDrainWatcherSetup( job := mock.Job() jobID := structs.NamespacedID{Namespace: job.Namespace, ID: job.ID} index++ - must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, index, job)) + must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, index, nil, job)) // Create draining nodes, each with its own alloc for the job running on that node node := mock.Node() diff --git a/nomad/eval_endpoint_test.go b/nomad/eval_endpoint_test.go index 8b3fa2649..14c9570e4 100644 --- a/nomad/eval_endpoint_test.go +++ b/nomad/eval_endpoint_test.go @@ -381,7 +381,7 @@ func TestEvalEndpoint_Dequeue_UpdateWaitIndex(t *testing.T) { state := s1.fsm.State() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job); err != nil { t.Fatalf("err: %v", err) } @@ -910,7 +910,7 @@ func TestEvalEndpoint_Delete(t *testing.T) { job.ID = "notsafetodelete" job.Status = structs.JobStatusRunning index++ - must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, index, job)) + must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, index, nil, job)) evalsNotSafeToDelete := []*structs.Evaluation{} for i := 0; i < 3; i++ { diff --git a/nomad/fsm.go b/nomad/fsm.go index f8ec2084b..b44293181 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -547,7 +547,7 @@ func (n *nomadFSM) applyUpsertJob(msgType structs.MessageType, buf []byte, index */ req.Job.Canonicalize() - if err := n.state.UpsertJob(msgType, index, req.Job); err != nil { + if err := n.state.UpsertJob(msgType, index, req.Submission, req.Job); err != nil { n.logger.Error("UpsertJob failed", "error", err) return err } @@ -786,7 +786,7 @@ func (n *nomadFSM) handleJobDeregister(index uint64, jobID, namespace string, pu stopped := current.Copy() stopped.Stop = true - if err := n.state.UpsertJobTxn(index, stopped, tx); err != nil { + if err := n.state.UpsertJobTxn(index, nil, stopped, tx); err != nil { return fmt.Errorf("UpsertJob failed: %w", err) } } diff --git a/nomad/fsm_test.go b/nomad/fsm_test.go index 4d67df168..037b225e1 100644 --- a/nomad/fsm_test.go +++ b/nomad/fsm_test.go @@ -1766,7 +1766,7 @@ func TestFSM_JobStabilityUpdate(t *testing.T) { // Upsert a deployment job := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job); err != nil { t.Fatalf("bad: %v", err) } @@ -1811,7 +1811,7 @@ func TestFSM_DeploymentPromotion(t *testing.T) { tg2 := tg1.Copy() tg2.Name = "foo" j.TaskGroups = append(j.TaskGroups, tg2) - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, j); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, j); err != nil { t.Fatalf("bad: %v", err) } @@ -2259,9 +2259,9 @@ func TestFSM_SnapshotRestore_Jobs(t *testing.T) { fsm := testFSM(t) state := fsm.State() job1 := mock.Job() - state.UpsertJob(structs.MsgTypeTestSetup, 1000, job1) + state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job1) job2 := mock.Job() - state.UpsertJob(structs.MsgTypeTestSetup, 1001, job2) + state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2) // Verify the contents ws := memdb.NewWatchSet() @@ -2439,12 +2439,12 @@ func TestFSM_SnapshotRestore_JobSummary(t *testing.T) { state := fsm.State() job1 := mock.Job() - state.UpsertJob(structs.MsgTypeTestSetup, 1000, job1) + state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job1) ws := memdb.NewWatchSet() js1, _ := state.JobSummaryByID(ws, job1.Namespace, job1.ID) job2 := mock.Job() - state.UpsertJob(structs.MsgTypeTestSetup, 1001, job2) + state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2) js2, _ := state.JobSummaryByID(ws, job2.Namespace, job2.ID) // Verify the contents @@ -2489,10 +2489,10 @@ func TestFSM_SnapshotRestore_JobVersions(t *testing.T) { fsm := testFSM(t) state := fsm.State() job1 := mock.Job() - state.UpsertJob(structs.MsgTypeTestSetup, 1000, job1) + state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job1) job2 := mock.Job() job2.ID = job1.ID - state.UpsertJob(structs.MsgTypeTestSetup, 1001, job2) + state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2) // Verify the contents ws := memdb.NewWatchSet() @@ -2523,7 +2523,7 @@ func TestFSM_SnapshotRestore_Deployments(t *testing.T) { d1.JobID = j.ID d2.JobID = j.ID - state.UpsertJob(structs.MsgTypeTestSetup, 999, j) + state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, j) state.UpsertDeployment(1000, d1) state.UpsertDeployment(1001, d2) @@ -2755,12 +2755,12 @@ func TestFSM_ReconcileSummaries(t *testing.T) { // Make a job so that none of the tasks can be placed job1 := mock.Job() job1.TaskGroups[0].Tasks[0].Resources.CPU = 5000 - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, job1)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job1)) // make a job which can make partial progress alloc := mock.Alloc() alloc.NodeID = node.ID - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, alloc.Job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, alloc.Job)) require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc})) // Delete the summaries @@ -2841,7 +2841,7 @@ func TestFSM_ReconcileParentJobSummary(t *testing.T) { Payload: "random", } job1.TaskGroups[0].Count = 1 - state.UpsertJob(structs.MsgTypeTestSetup, 1000, job1) + state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job1) // Make a child job childJob := job1.Copy() @@ -2857,7 +2857,7 @@ func TestFSM_ReconcileParentJobSummary(t *testing.T) { alloc.JobID = childJob.ID alloc.ClientStatus = structs.AllocClientStatusRunning - state.UpsertJob(structs.MsgTypeTestSetup, 1010, childJob) + state.UpsertJob(structs.MsgTypeTestSetup, 1010, nil, childJob) state.UpsertAllocs(structs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc}) // Make the summary incorrect in the state store diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index ce0c9e810..264bea439 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -18,7 +18,6 @@ import ( "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-set" - "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pointer" @@ -114,6 +113,9 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis } args.Job = job + // Run the submission controller + warnings = append(warnings, j.submissionController(args)) + // Attach the Nomad token's accessor ID so that deploymentwatcher // can reference the token later nomadACLToken, err := j.srv.ResolveSecretToken(args.AuthToken) @@ -1186,6 +1188,52 @@ func (j *Job) Scale(args *structs.JobScaleRequest, reply *structs.JobRegisterRes return nil } +func (j *Job) GetJobSubmission(args *structs.JobSubmissionRequest, reply *structs.JobSubmissionResponse) error { + authErr := j.srv.Authenticate(j.ctx, args) + if done, err := j.srv.forward("Job.GetJobSubmission", args, args, reply); done { + return err + } + j.srv.MeasureRPCRate("job_submission", structs.RateMetricRead, args) + if authErr != nil { + return structs.ErrPermissionDenied + } + defer metrics.MeasureSince([]string{"nomad", "job", "get_job_submission"}, time.Now()) + + // Check for read-job permissions + if aclObj, err := j.srv.ResolveACL(args); err != nil { + return err + } else if aclObj != nil && !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityReadJob) { + return structs.ErrPermissionDenied + } + + // Setup the blocking query + opts := blockingOptions{ + queryOpts: &args.QueryOptions, + queryMeta: &reply.QueryMeta, + run: func(ws memdb.WatchSet, state *state.StateStore) error { + // Look for the submission + out, err := state.JobSubmission(ws, args.RequestNamespace(), args.JobID, args.Version) + if err != nil { + return err + } + + // Setup the output + reply.Submission = out + if out != nil { + // associate with the index of the job this submission originates from + reply.Index = out.JobModifyIndex + } else { + // if there is no submission context, associate with no index + reply.Index = 0 + } + + // Set the query response + j.srv.setQueryMeta(&reply.QueryMeta) + return nil + }} + return j.srv.blockingRPC(&opts) +} + // GetJob is used to request information about a specific job func (j *Job) GetJob(args *structs.JobSpecificRequest, reply *structs.SingleJobResponse) error { @@ -1757,13 +1805,13 @@ func (j *Job) Plan(args *structs.JobPlanRequest, reply *structs.JobPlanResponse) if oldJob.SpecChanged(args.Job) { // Insert the updated Job into the snapshot updatedIndex = oldJob.JobModifyIndex + 1 - if err := snap.UpsertJob(structs.IgnoreUnknownTypeFlag, updatedIndex, args.Job); err != nil { + if err := snap.UpsertJob(structs.IgnoreUnknownTypeFlag, updatedIndex, nil, args.Job); err != nil { return err } } } else if oldJob == nil { // Insert the updated Job into the snapshot - err := snap.UpsertJob(structs.IgnoreUnknownTypeFlag, 100, args.Job) + err := snap.UpsertJob(structs.IgnoreUnknownTypeFlag, 100, nil, args.Job) if err != nil { return err } diff --git a/nomad/job_endpoint_hooks.go b/nomad/job_endpoint_hooks.go index 61c9175bb..631d280cd 100644 --- a/nomad/job_endpoint_hooks.go +++ b/nomad/job_endpoint_hooks.go @@ -6,6 +6,7 @@ package nomad import ( "fmt" + "github.com/dustin/go-humanize" "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" @@ -335,3 +336,31 @@ func (v *memoryOversubscriptionValidate) Validate(job *structs.Job) (warnings [] return warnings, err } + +// submissionController is used to protect against job source sizes that exceed +// the maximum as set in server config as job_max_source_size +// +// Such jobs will have their source discarded and emit a warning, but the job +// itself will still continue with being registered. +func (j *Job) submissionController(args *structs.JobRegisterRequest) error { + if args.Submission == nil { + return nil + } + maxSize := j.srv.GetConfig().JobMaxSourceSize + submission := args.Submission + // discard the submission if the source + variables is larger than the maximum + // allowable size as set by client config + totalSize := len(submission.Source) + totalSize += len(submission.Variables) + for key, value := range submission.VariableFlags { + totalSize += len(key) + totalSize += len(value) + } + if totalSize > maxSize { + args.Submission = nil + totalSizeHuman := humanize.Bytes(uint64(totalSize)) + maxSizeHuman := humanize.Bytes(uint64(maxSize)) + return fmt.Errorf("job source size of %s exceeds maximum of %s and will be discarded", totalSizeHuman, maxSizeHuman) + } + return nil +} diff --git a/nomad/job_endpoint_hooks_test.go b/nomad/job_endpoint_hooks_test.go index 645f3b0b1..b6475e14d 100644 --- a/nomad/job_endpoint_hooks_test.go +++ b/nomad/job_endpoint_hooks_test.go @@ -769,3 +769,40 @@ func Test_jobCanonicalizer_Mutate(t *testing.T) { }) } } + +func TestJob_submissionController(t *testing.T) { + ci.Parallel(t) + args := &structs.JobRegisterRequest{ + Submission: &structs.JobSubmission{ + Source: "this is some hcl content", + Format: "hcl2", + Variables: "variables", + }, + } + t.Run("nil", func(t *testing.T) { + j := &Job{srv: &Server{ + config: &Config{JobMaxSourceSize: 1024}, + }} + err := j.submissionController(&structs.JobRegisterRequest{ + Submission: nil, + }) + must.NoError(t, err) + }) + t.Run("under max size", func(t *testing.T) { + j := &Job{srv: &Server{ + config: &Config{JobMaxSourceSize: 1024}, + }} + err := j.submissionController(args) + must.NoError(t, err) + must.NotNil(t, args.Submission) + }) + + t.Run("over max size", func(t *testing.T) { + j := &Job{srv: &Server{ + config: &Config{JobMaxSourceSize: 1}, + }} + err := j.submissionController(args) + must.ErrorContains(t, err, "job source size of 33 B exceeds maximum of 1 B and will be discarded") + must.Nil(t, args.Submission) + }) +} diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index b58d40889..b39e95014 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" @@ -2771,12 +2771,12 @@ func TestJobEndpoint_Revert_ACL(t *testing.T) { // Create the jobs job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 300, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 300, nil, job) require.Nil(err) job2 := job.Copy() job2.Priority = 1 - err = state.UpsertJob(structs.MsgTypeTestSetup, 400, job2) + err = state.UpsertJob(structs.MsgTypeTestSetup, 400, nil, job2) require.Nil(err) // Create revert request and enforcing it be at the current version @@ -2899,7 +2899,7 @@ func TestJobEndpoint_Stable_ACL(t *testing.T) { // Register the job job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.Nil(err) // Create stability request @@ -3130,7 +3130,7 @@ func TestJobEndpoint_Evaluate_ACL(t *testing.T) { // Create the job job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 300, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 300, nil, job) require.Nil(err) // Force a re-evaluation @@ -3384,7 +3384,7 @@ func TestJobEndpoint_Deregister_ACL(t *testing.T) { // Create and register a job job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) require.Nil(err) // Deregister and purge @@ -3923,8 +3923,8 @@ func TestJobEndpoint_BatchDeregister_ACL(t *testing.T) { // Create and register a job job, job2 := mock.Job(), mock.Job() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 100, job)) - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 101, job2)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job2)) // Deregister req := &structs.JobBatchDeregisterRequest{ @@ -3993,7 +3993,7 @@ func TestJobEndpoint_Deregister_Priority(t *testing.T) { // Create a job which a custom priority and register this. job := mock.Job() job.Priority = 90 - err := fsmState.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err := fsmState.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) requireAssertion.Nil(err) // Deregister. @@ -4118,7 +4118,7 @@ func TestJobEndpoint_GetJob_ACL(t *testing.T) { // Create the job job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.Nil(err) // Lookup the job @@ -4179,14 +4179,14 @@ func TestJobEndpoint_GetJob_Blocking(t *testing.T) { // Upsert a job we are not interested in first. time.AfterFunc(100*time.Millisecond, func() { - if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job1); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job1); err != nil { t.Fatalf("err: %v", err) } }) // Upsert another job later which should trigger the watch. time.AfterFunc(200*time.Millisecond, func() { - if err := state.UpsertJob(structs.MsgTypeTestSetup, 200, job2); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 200, nil, job2); err != nil { t.Fatalf("err: %v", err) } }) @@ -4314,6 +4314,208 @@ func TestJobEndpoint_GetJobVersions(t *testing.T) { } } +func TestJobEndpoint_GetJobSubmission(t *testing.T) { + ci.Parallel(t) + + s1, cleanupS1 := TestServer(t, nil) + t.Cleanup(cleanupS1) + + codec := rpcClient(t, s1) + testutil.WaitForLeaders(t, s1.RPC) + + // create a job to register and make queries about + job := mock.Job() + registerRequest := &structs.JobRegisterRequest{ + Job: job, + Submission: &structs.JobSubmission{ + Source: "job \"my-job\" { group \"g1\" {} }", + Format: "hcl2", + VariableFlags: map[string]string{"one": "1"}, + Variables: "two = 2", + }, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: job.Namespace, + }, + } + + // register the job first ime + var registerResponse structs.JobRegisterResponse + err := msgpackrpc.CallWithCodec(codec, "Job.Register", registerRequest, ®isterResponse) + must.NoError(t, err) + indexV0 := registerResponse.Index + + // register the job a second time, creating another version (v0, v1) + // with a new job source file and variables + job.Priority++ // trigger new version + registerRequest.Submission = &structs.JobSubmission{ + Source: "job \"my-job\" { group \"g2\" {} }", + Format: "hcl2", + VariableFlags: map[string]string{"three": "3"}, + Variables: "four = 4", + } + err = msgpackrpc.CallWithCodec(codec, "Job.Register", registerRequest, ®isterResponse) + must.NoError(t, err) + indexV1 := registerResponse.Index + + // lookup the submission for v0 + submissionRequestV0 := &structs.JobSubmissionRequest{ + JobID: job.ID, + Version: 0, + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: job.Namespace, + }, + } + + var submissionResponse structs.JobSubmissionResponse + err = msgpackrpc.CallWithCodec(codec, "Job.GetJobSubmission", submissionRequestV0, &submissionResponse) + must.NoError(t, err) + + sub := submissionResponse.Submission + must.StrContains(t, sub.Source, "g1") + must.Eq(t, "hcl2", sub.Format) + must.Eq(t, map[string]string{"one": "1"}, sub.VariableFlags) + must.Eq(t, "two = 2", sub.Variables) + must.Eq(t, job.Namespace, sub.Namespace) + must.Eq(t, indexV0, sub.JobModifyIndex) + + // lookup the submission for v1 + submissionRequestV1 := &structs.JobSubmissionRequest{ + JobID: job.ID, + Version: 1, + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: job.Namespace, + }, + } + + var submissionResponseV1 structs.JobSubmissionResponse + err = msgpackrpc.CallWithCodec(codec, "Job.GetJobSubmission", submissionRequestV1, &submissionResponseV1) + must.NoError(t, err) + + sub = submissionResponseV1.Submission + must.StrContains(t, sub.Source, "g2") + must.Eq(t, "hcl2", sub.Format) + must.Eq(t, map[string]string{"three": "3"}, sub.VariableFlags) + must.Eq(t, "four = 4", sub.Variables) + must.Eq(t, job.Namespace, sub.Namespace) + must.Eq(t, indexV1, sub.JobModifyIndex) + + // lookup non-existent submission v2 + submissionRequestV2 := &structs.JobSubmissionRequest{ + JobID: job.ID, + Version: 2, + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: job.Namespace, + }, + } + + var submissionResponseV2 structs.JobSubmissionResponse + err = msgpackrpc.CallWithCodec(codec, "Job.GetJobSubmission", submissionRequestV2, &submissionResponseV2) + must.NoError(t, err) + must.Nil(t, submissionResponseV2.Submission) + + // create a deregister request + deRegisterRequest := &structs.JobDeregisterRequest{ + JobID: job.ID, + Purge: true, // force gc + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: job.Namespace, + }, + } + + var deRegisterResponse structs.JobDeregisterResponse + err = msgpackrpc.CallWithCodec(codec, "Job.Deregister", deRegisterRequest, &deRegisterResponse) + must.NoError(t, err) + + // lookup the submission for v0 again + submissionRequestV0 = &structs.JobSubmissionRequest{ + JobID: job.ID, + Version: 0, + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: job.Namespace, + }, + } + + // should no longer exist + var submissionResponseGone structs.JobSubmissionResponse + err = msgpackrpc.CallWithCodec(codec, "Job.GetJobSubmission", submissionRequestV0, &submissionResponseGone) + must.NoError(t, err) + must.Nil(t, submissionResponseGone.Submission, must.Sprintf("got sub: %#v", submissionResponseGone.Submission)) +} + +func TestJobEndpoint_GetJobSubmission_ACL(t *testing.T) { + ci.Parallel(t) + + s1, root, cleanupS1 := TestACLServer(t, nil) + t.Cleanup(cleanupS1) + + codec := rpcClient(t, s1) + testutil.WaitForLeaders(t, s1.RPC) + + // create a namespace to upsert + namespaceUpsertRequest := &structs.NamespaceUpsertRequest{ + Namespaces: []*structs.Namespace{{ + Name: "hashicorp", + Description: "My Namespace", + }}, + WriteRequest: structs.WriteRequest{ + Region: "global", + AuthToken: root.SecretID, + }, + } + + var namespaceUpsertResponse structs.GenericResponse + err := msgpackrpc.CallWithCodec(codec, "Namespace.UpsertNamespaces", namespaceUpsertRequest, &namespaceUpsertResponse) + must.NoError(t, err) + + // create a job to register and make queries about + job := mock.Job() + job.Namespace = "hashicorp" + registerRequest := &structs.JobRegisterRequest{ + Job: job, + Submission: &structs.JobSubmission{ + Source: "job \"my-job\" { group \"g1\" {} }", + Format: "hcl2", + VariableFlags: map[string]string{"one": "1"}, + Variables: "two = 2", + }, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: job.Namespace, + AuthToken: root.SecretID, + }, + } + + // register the job + var registerResponse structs.JobRegisterResponse + err = msgpackrpc.CallWithCodec(codec, "Job.Register", registerRequest, ®isterResponse) + must.NoError(t, err) + + // make a request with no token set + submissionRequest := &structs.JobSubmissionRequest{ + JobID: job.ID, + Version: 0, + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: job.Namespace, + }, + } + var submissionResponse structs.JobSubmissionResponse + err = msgpackrpc.CallWithCodec(codec, "Job.GetJobSubmission", submissionRequest, &submissionResponse) + must.ErrorContains(t, err, "Permission denied") + + // make request with token set + submissionRequest.QueryOptions.AuthToken = root.SecretID + var submissionResponse2 structs.JobSubmissionResponse + err = msgpackrpc.CallWithCodec(codec, "Job.GetJobSubmission", submissionRequest, &submissionResponse2) + must.NoError(t, err) +} + func TestJobEndpoint_GetJobVersions_ACL(t *testing.T) { ci.Parallel(t) require := require.New(t) @@ -4327,11 +4529,11 @@ func TestJobEndpoint_GetJobVersions_ACL(t *testing.T) { // Create two versions of a job with different priorities job := mock.Job() job.Priority = 88 - err := state.UpsertJob(structs.MsgTypeTestSetup, 10, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 10, nil, job) require.Nil(err) job.Priority = 100 - err = state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + err = state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) require.Nil(err) // Lookup the job @@ -4496,14 +4698,14 @@ func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) { // Upsert a job we are not interested in first. time.AfterFunc(100*time.Millisecond, func() { - if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job1); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job1); err != nil { t.Fatalf("err: %v", err) } }) // Upsert another job later which should trigger the watch. time.AfterFunc(200*time.Millisecond, func() { - if err := state.UpsertJob(structs.MsgTypeTestSetup, 200, job2); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 200, nil, job2); err != nil { t.Fatalf("err: %v", err) } }) @@ -4534,7 +4736,7 @@ func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) { // Upsert the job again which should trigger the watch. time.AfterFunc(100*time.Millisecond, func() { - if err := state.UpsertJob(structs.MsgTypeTestSetup, 300, job3); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 300, nil, job3); err != nil { t.Fatalf("err: %v", err) } }) @@ -4725,7 +4927,7 @@ func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) { // Create a job and insert it job1 := mock.Job() time.AfterFunc(200*time.Millisecond, func() { - if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job1); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job1); err != nil { t.Fatalf("err: %v", err) } }) @@ -4818,7 +5020,7 @@ func TestJobEndpoint_ListJobs(t *testing.T) { // Create the register request job := mock.Job() state := s1.fsm.State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.NoError(t, err) // Lookup the jobs @@ -4883,7 +5085,7 @@ func TestJobEndpoint_ListJobs_AllNamespaces_OSS(t *testing.T) { // Create the register request job := mock.Job() state := s1.fsm.State() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -4950,7 +5152,7 @@ func TestJobEndpoint_ListJobs_WithACL(t *testing.T) { // Create the register request job := mock.Job() - err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.Nil(err) req := &structs.JobListRequest{ @@ -5008,7 +5210,7 @@ func TestJobEndpoint_ListJobs_Blocking(t *testing.T) { // Upsert job triggers watches time.AfterFunc(100*time.Millisecond, func() { - if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job); err != nil { t.Fatalf("err: %v", err) } }) @@ -5103,7 +5305,7 @@ func TestJobEndpoint_ListJobs_PaginationFiltering(t *testing.T) { job.Namespace = m.namespace } job.CreateIndex = index - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, index, job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, index, nil, job)) } aclToken := mock.CreatePolicyAndToken(t, state, 1100, "test-valid-read", @@ -5595,7 +5797,7 @@ func TestJobEndpoint_Deployments(t *testing.T) { d2 := mock.Deployment() d1.JobID = j.ID d2.JobID = j.ID - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j), "UpsertJob") + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob") d1.JobCreateIndex = j.CreateIndex d2.JobCreateIndex = j.CreateIndex @@ -5632,7 +5834,7 @@ func TestJobEndpoint_Deployments_ACL(t *testing.T) { d2 := mock.Deployment() d1.JobID = j.ID d2.JobID = j.ID - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j), "UpsertJob") + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob") d1.JobCreateIndex = j.CreateIndex d2.JobCreateIndex = j.CreateIndex require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") @@ -5695,7 +5897,7 @@ func TestJobEndpoint_Deployments_Blocking(t *testing.T) { d1 := mock.Deployment() d2 := mock.Deployment() d2.JobID = j.ID - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 50, j), "UpsertJob") + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 50, nil, j), "UpsertJob") d2.JobCreateIndex = j.CreateIndex // First upsert an unrelated eval time.AfterFunc(100*time.Millisecond, func() { @@ -5745,7 +5947,7 @@ func TestJobEndpoint_LatestDeployment(t *testing.T) { d2.JobID = j.ID d2.CreateIndex = d1.CreateIndex + 100 d2.ModifyIndex = d2.CreateIndex + 100 - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j), "UpsertJob") + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob") d1.JobCreateIndex = j.CreateIndex d2.JobCreateIndex = j.CreateIndex require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") @@ -5784,7 +5986,7 @@ func TestJobEndpoint_LatestDeployment_ACL(t *testing.T) { d2.JobID = j.ID d2.CreateIndex = d1.CreateIndex + 100 d2.ModifyIndex = d2.CreateIndex + 100 - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, j), "UpsertJob") + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob") d1.JobCreateIndex = j.CreateIndex d2.JobCreateIndex = j.CreateIndex require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment") @@ -5850,7 +6052,7 @@ func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) { d1 := mock.Deployment() d2 := mock.Deployment() d2.JobID = j.ID - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 50, j), "UpsertJob") + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 50, nil, j), "UpsertJob") d2.JobCreateIndex = j.CreateIndex // First upsert an unrelated eval @@ -6443,7 +6645,7 @@ func TestJobEndpoint_Dispatch_ACL(t *testing.T) { // Create a parameterized job job := mock.BatchJob() job.ParameterizedJob = &structs.ParameterizedJobConfig{} - err := state.UpsertJob(structs.MsgTypeTestSetup, 400, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 400, nil, job) require.Nil(err) req := &structs.JobDispatchRequest{ @@ -6972,7 +7174,7 @@ func TestJobEndpoint_Dispatch_ACL_RejectedBySchedulerConfig(t *testing.T) { job := mock.BatchJob() job.ParameterizedJob = &structs.ParameterizedJobConfig{} - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.NoError(t, err) dispatch := &structs.JobDispatchRequest{ @@ -7062,7 +7264,7 @@ func TestJobEndpoint_Scale(t *testing.T) { job := mock.Job() originalCount := job.TaskGroups[0].Count - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.Nil(err) groupName := job.TaskGroups[0].Name @@ -7119,7 +7321,7 @@ func TestJobEndpoint_Scale_DeploymentBlocking(t *testing.T) { for _, tc := range cases { // create a job with a deployment history job := mock.Job() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, job), "UpsertJob") + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job), "UpsertJob") d1 := mock.Deployment() d1.Status = structs.DeploymentStatusCancelled d1.StatusDescription = structs.DeploymentStatusDescriptionNewerJob @@ -7198,7 +7400,7 @@ func TestJobEndpoint_Scale_InformationalEventsShouldNotBeBlocked(t *testing.T) { for _, tc := range cases { // create a job with a deployment history job := mock.Job() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, job), "UpsertJob") + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job), "UpsertJob") d1 := mock.Deployment() d1.Status = structs.DeploymentStatusCancelled d1.StatusDescription = structs.DeploymentStatusDescriptionNewerJob @@ -7264,7 +7466,7 @@ func TestJobEndpoint_Scale_ACL(t *testing.T) { state := s1.fsm.State() job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.Nil(err) scale := &structs.JobScaleRequest{ @@ -7347,7 +7549,7 @@ func TestJobEndpoint_Scale_ACL_RejectedBySchedulerConfig(t *testing.T) { state := s1.fsm.State() job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.NoError(t, err) scale := &structs.JobScaleRequest{ @@ -7469,7 +7671,7 @@ func TestJobEndpoint_Scale_Invalid(t *testing.T) { require.Contains(err.Error(), "not found") // register the job - err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.Nil(err) scale.Count = pointer.Of(int64(10)) @@ -7496,7 +7698,7 @@ func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) { job.TaskGroups[0].Count = 5 // register the job - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.Nil(err) var resp structs.JobRegisterResponse @@ -7602,7 +7804,7 @@ func TestJobEndpoint_Scale_Priority(t *testing.T) { job := mock.Job() job.Priority = 90 originalCount := job.TaskGroups[0].Count - err := fsmState.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := fsmState.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) requireAssertion.Nil(err) groupName := job.TaskGroups[0].Name @@ -7649,7 +7851,7 @@ func TestJobEndpoint_InvalidCount(t *testing.T) { state := s1.fsm.State() job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.Nil(err) scale := &structs.JobScaleRequest{ @@ -7694,7 +7896,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { require.Nil(resp2.JobScaleStatus) // stopped (previous version) - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1000, jobV1), "UpsertJob") + require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, jobV1), "UpsertJob") a0 := mock.Alloc() a0.Job = jobV1 a0.Namespace = jobV1.Namespace @@ -7703,7 +7905,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1010, []*structs.Allocation{a0}), "UpsertAllocs") jobV2 := jobV1.Copy() - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1100, jobV2), "UpsertJob") + require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1100, nil, jobV2), "UpsertJob") a1 := mock.Alloc() a1.Job = jobV2 a1.Namespace = jobV2.Namespace @@ -7796,7 +7998,7 @@ func TestJobEndpoint_GetScaleStatus_ACL(t *testing.T) { // Create the job job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.Nil(err) // Get the job scale status @@ -7879,7 +8081,7 @@ func TestJob_GetServiceRegistrations(t *testing.T) { correctSetupFn := func(s *Server) (error, string, *structs.ServiceRegistration) { // Generate an upsert a job. job := mock.Job() - err := s.State().UpsertJob(structs.MsgTypeTestSetup, 10, job) + err := s.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job) if err != nil { return nil, "", nil } @@ -7967,7 +8169,7 @@ func TestJob_GetServiceRegistrations(t *testing.T) { // Generate an upsert a job. job := mock.Job() - require.NoError(t, s.State().UpsertJob(structs.MsgTypeTestSetup, 10, job)) + require.NoError(t, s.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)) // Perform a lookup and test the response. serviceRegReq := &structs.JobServiceRegistrationsRequest{ diff --git a/nomad/mock/mock.go b/nomad/mock/mock.go index a35c546de..21175ea60 100644 --- a/nomad/mock/mock.go +++ b/nomad/mock/mock.go @@ -43,6 +43,50 @@ func HCL() string { ` } +// HCLVar returns a the HCL of job that requires a HCL variables S, N, +// and B to be set. Also returns the content of a vars-file to satisfy +// those variables. +func HCLVar() (string, string) { + return ` +variable "S" { + type = string +} + +variable "N" { + type = number +} + +variable "B" { + type = bool +} + +job "var-job" { + type = "batch" + constraint { + attribute = "${attr.kernel.name}" + value = "linux" + } + group "group" { + task "task" { + driver = "raw_exec" + config { + command = "echo" + args = ["S is ${var.S}, N is ${var.N}, B is ${var.B}"] + } + resources { + cpu = 10 + memory = 32 + } + } + } +} +`, ` +S = "stringy" +N = 42 +B = true +` +} + func Eval() *structs.Evaluation { now := time.Now().UTC().UnixNano() eval := &structs.Evaluation{ diff --git a/nomad/namespace_endpoint_test.go b/nomad/namespace_endpoint_test.go index be380b672..a0578b513 100644 --- a/nomad/namespace_endpoint_test.go +++ b/nomad/namespace_endpoint_test.go @@ -502,7 +502,7 @@ func TestNamespaceEndpoint_DeleteNamespaces_NonTerminal_Local(t *testing.T) { // Create a job in one j := mock.Job() j.Namespace = ns1.Name - assert.Nil(s1.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 1001, j)) + assert.Nil(s1.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 1001, nil, j)) // Lookup the namespaces req := &structs.NamespaceDeleteRequest{ @@ -553,7 +553,7 @@ func TestNamespaceEndpoint_DeleteNamespaces_NonTerminal_Federated_ACL(t *testing // Create a job in the namespace on the non-authority j := mock.Job() j.Namespace = ns1.Name - assert.Nil(s2.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 1001, j)) + assert.Nil(s2.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 1001, nil, j)) // Delete the namespaces without the correct permissions req := &structs.NamespaceDeleteRequest{ diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index 4a712da2b..a3c5b203c 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -778,7 +778,7 @@ func TestClientEndpoint_Register_GetEvals(t *testing.T) { // Register a system job. job := mock.SystemJob() state := s1.fsm.State() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job); err != nil { t.Fatalf("err: %v", err) } @@ -869,7 +869,7 @@ func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) { // Register a system job. job := mock.SystemJob() state := s1.fsm.State() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job); err != nil { t.Fatalf("err: %v", err) } @@ -1191,7 +1191,7 @@ func TestClientEndpoint_UpdateDrain(t *testing.T) { // Register a system job job := mock.SystemJob() - require.Nil(s1.State().UpsertJob(structs.MsgTypeTestSetup, 10, job)) + require.Nil(s1.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)) // Update the eligibility and expect evals dereg.DrainStrategy = nil @@ -1655,7 +1655,7 @@ func TestClientEndpoint_UpdateEligibility(t *testing.T) { // Register a system job job := mock.SystemJob() - require.Nil(s1.State().UpsertJob(structs.MsgTypeTestSetup, 10, job)) + require.Nil(s1.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)) // Update the eligibility and expect evals elig.Eligibility = structs.NodeSchedulingEligible @@ -2652,7 +2652,7 @@ func TestClientEndpoint_UpdateAlloc(t *testing.T) { // Inject mock job job := mock.Job() job.ID = "mytestjob" - err := state.UpsertJob(structs.MsgTypeTestSetup, 101, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job) require.Nil(err) // Inject fake allocations @@ -2740,7 +2740,7 @@ func TestClientEndpoint_UpdateAlloc_NodeNotReady(t *testing.T) { state := s1.fsm.State() job := mock.Job() - err = state.UpsertJob(structs.MsgTypeTestSetup, 101, job) + err = state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job) require.NoError(t, err) alloc := mock.Alloc() @@ -2897,7 +2897,7 @@ func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) { // Inject mock job job := mock.Job() job.ID = alloc.JobID - err := state.UpsertJob(structs.MsgTypeTestSetup, 101, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -2964,7 +2964,7 @@ func TestClientEndpoint_CreateNodeEvals(t *testing.T) { // Inject a fake system job. job := mock.SystemJob() - if err := state.UpsertJob(structs.MsgTypeTestSetup, idx, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, idx, nil, job); err != nil { t.Fatalf("err: %v", err) } idx++ @@ -3056,14 +3056,14 @@ func TestClientEndpoint_CreateNodeEvals_MultipleNSes(t *testing.T) { // Inject a fake system job. defaultJob := mock.SystemJob() - err = state.UpsertJob(structs.MsgTypeTestSetup, idx, defaultJob) + err = state.UpsertJob(structs.MsgTypeTestSetup, idx, nil, defaultJob) require.NoError(t, err) idx++ nsJob := mock.SystemJob() nsJob.ID = defaultJob.ID nsJob.Namespace = ns1.Name - err = state.UpsertJob(structs.MsgTypeTestSetup, idx, nsJob) + err = state.UpsertJob(structs.MsgTypeTestSetup, idx, nil, nsJob) require.NoError(t, err) idx++ @@ -3117,14 +3117,14 @@ func TestClientEndpoint_CreateNodeEvals_MultipleDCes(t *testing.T) { // Inject a fake system job in the same dc defaultJob := mock.SystemJob() defaultJob.Datacenters = []string{"test1", "test2"} - err = state.UpsertJob(structs.MsgTypeTestSetup, idx, defaultJob) + err = state.UpsertJob(structs.MsgTypeTestSetup, idx, nil, defaultJob) require.NoError(t, err) idx++ // Inject a fake system job in a different dc nsJob := mock.SystemJob() nsJob.Datacenters = []string{"test2", "test3"} - err = state.UpsertJob(structs.MsgTypeTestSetup, idx, nsJob) + err = state.UpsertJob(structs.MsgTypeTestSetup, idx, nil, nsJob) require.NoError(t, err) idx++ @@ -4201,7 +4201,7 @@ func TestClientEndpoint_UpdateAlloc_Evals_ByTrigger(t *testing.T) { job.ID = tc.name + "-test-job" if !tc.missingJob { - err = fsmState.UpsertJob(structs.MsgTypeTestSetup, 101, job) + err = fsmState.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job) require.NoError(t, err) } diff --git a/nomad/operator_endpoint_test.go b/nomad/operator_endpoint_test.go index 53bccbf40..517feac10 100644 --- a/nomad/operator_endpoint_test.go +++ b/nomad/operator_endpoint_test.go @@ -759,7 +759,7 @@ func generateSnapshot(t *testing.T) (*snapshot.Snapshot, *structs.Job) { err := msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq, &jobResp) require.NoError(t, err) - err = s.State().UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err = s.State().UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.NoError(t, err) snapshot, err := snapshot.New(s.logger, s.raft) diff --git a/nomad/periodic_endpoint_test.go b/nomad/periodic_endpoint_test.go index 3363e7891..e2a04eb29 100644 --- a/nomad/periodic_endpoint_test.go +++ b/nomad/periodic_endpoint_test.go @@ -30,7 +30,7 @@ func TestPeriodicEndpoint_Force(t *testing.T) { // Create and insert a periodic job. job := mock.PeriodicJob() job.Periodic.ProhibitOverlap = true // Shouldn't affect anything. - if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job); err != nil { t.Fatalf("err: %v", err) } s1.periodicDispatcher.Add(job) @@ -82,7 +82,7 @@ func TestPeriodicEndpoint_Force_ACL(t *testing.T) { // Create and insert a periodic job. job := mock.PeriodicJob() job.Periodic.ProhibitOverlap = true // Shouldn't affect anything. - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 100, job)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) err := s1.periodicDispatcher.Add(job) assert.Nil(err) @@ -179,7 +179,7 @@ func TestPeriodicEndpoint_Force_NonPeriodic(t *testing.T) { // Create and insert a non-periodic job. job := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job); err != nil { t.Fatalf("err: %v", err) } diff --git a/nomad/periodic_test.go b/nomad/periodic_test.go index eb047514d..02e729c4f 100644 --- a/nomad/periodic_test.go +++ b/nomad/periodic_test.go @@ -675,7 +675,7 @@ func TestPeriodicDispatch_RunningChildren_NoEvals(t *testing.T) { // Insert job. state := s1.fsm.State() job := mock.PeriodicJob() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { t.Fatalf("UpsertJob failed: %v", err) } @@ -699,12 +699,12 @@ func TestPeriodicDispatch_RunningChildren_ActiveEvals(t *testing.T) { // Insert periodic job and child. state := s1.fsm.State() job := mock.PeriodicJob() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { t.Fatalf("UpsertJob failed: %v", err) } childjob := deriveChildJob(job) - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1001, childjob); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, childjob); err != nil { t.Fatalf("UpsertJob failed: %v", err) } @@ -736,12 +736,12 @@ func TestPeriodicDispatch_RunningChildren_ActiveAllocs(t *testing.T) { // Insert periodic job and child. state := s1.fsm.State() job := mock.PeriodicJob() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { t.Fatalf("UpsertJob failed: %v", err) } childjob := deriveChildJob(job) - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1001, childjob); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, childjob); err != nil { t.Fatalf("UpsertJob failed: %v", err) } diff --git a/nomad/scaling_endpoint_test.go b/nomad/scaling_endpoint_test.go index 859ac0297..7c747a6ae 100644 --- a/nomad/scaling_endpoint_test.go +++ b/nomad/scaling_endpoint_test.go @@ -173,8 +173,8 @@ func TestScalingEndpoint_ListPolicies(t *testing.T) { j2polH.Type = "horizontal" j2polH.TargetTaskGroup(j2, j2.TaskGroups[0]) - s1.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 1000, j1) - s1.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 1000, j2) + s1.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j1) + s1.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j2) pols := []*structs.ScalingPolicy{j1polV, j1polH, j2polH} s1.fsm.State().UpsertScalingPolicies(1000, pols) diff --git a/nomad/search_endpoint_test.go b/nomad/search_endpoint_test.go index bc6673e98..59cecdde4 100644 --- a/nomad/search_endpoint_test.go +++ b/nomad/search_endpoint_test.go @@ -31,7 +31,7 @@ func registerMockJob(s *Server, t *testing.T, prefix string, counter int) *struc func registerJob(s *Server, t *testing.T, job *structs.Job) { fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, job)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) } func mockAlloc() *structs.Allocation { @@ -806,11 +806,11 @@ func TestSearch_PrefixSearch_Namespace_ACL(t *testing.T) { require.NoError(t, fsmState.UpsertNamespaces(500, []*structs.Namespace{ns})) job1 := mock.Job() - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 502, job1)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 502, nil, job1)) job2 := mock.Job() job2.Namespace = ns.Name - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 504, job2)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 504, nil, job2)) require.NoError(t, fsmState.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node())) @@ -931,7 +931,7 @@ func TestSearch_PrefixSearch_ScalingPolicy(t *testing.T) { prefix := policy.ID fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, job)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) req := &structs.SearchRequest{ Prefix: prefix, @@ -1450,7 +1450,7 @@ func TestSearch_FuzzySearch_ScalingPolicy(t *testing.T) { job, policy := mock.JobWithScalingPolicy() fsmState := s.fsm.State() - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, job)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) req := &structs.FuzzySearchRequest{ Text: policy.ID[0:3], // scaling policies are prefix searched @@ -1491,11 +1491,11 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { require.NoError(t, fsmState.UpsertNamespaces(500, []*structs.Namespace{ns})) job1 := mock.Job() - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 502, job1)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 502, nil, job1)) job2 := mock.Job() job2.Namespace = ns.Name - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 504, job2)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, 504, nil, job2)) node := mock.Node() node.Name = "run-jobs" @@ -1629,19 +1629,19 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { job1.Name = "teamA-job1" job1.ID = "job1" job1.Namespace = "teamA" - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), job1)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job1)) job2 := mock.Job() job2.Name = "teamB-job2" job2.ID = "job2" job2.Namespace = "teamB" - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), job2)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job2)) job3 := mock.Job() job3.Name = "teamC-job3" job3.ID = "job3" job3.Namespace = "teamC" - require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), job3)) + require.NoError(t, fsmState.UpsertJob(structs.MsgTypeTestSetup, inc(), nil, job3)) // Upsert a node node := mock.Node() diff --git a/nomad/service_registration_endpoint_test.go b/nomad/service_registration_endpoint_test.go index 4aea7c07b..f8bc8f6d3 100644 --- a/nomad/service_registration_endpoint_test.go +++ b/nomad/service_registration_endpoint_test.go @@ -887,7 +887,7 @@ func TestServiceRegistration_List(t *testing.T) { job := allocs[0].Job job.Namespace = "platform" allocs[0].Namespace = "platform" - require.NoError(t, s.State().UpsertJob(structs.MsgTypeTestSetup, 10, job)) + require.NoError(t, s.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)) s.signAllocIdentities(job, allocs) require.NoError(t, s.State().UpsertAllocs(structs.MsgTypeTestSetup, 15, allocs)) @@ -1174,7 +1174,7 @@ func TestServiceRegistration_GetService(t *testing.T) { // Generate an allocation with a signed identity allocs := []*structs.Allocation{mock.Alloc()} job := allocs[0].Job - require.NoError(t, s.State().UpsertJob(structs.MsgTypeTestSetup, 10, job)) + require.NoError(t, s.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)) s.signAllocIdentities(job, allocs) require.NoError(t, s.State().UpsertAllocs(structs.MsgTypeTestSetup, 15, allocs)) diff --git a/nomad/state/deployment_events_test.go b/nomad/state/deployment_events_test.go index 3015ff96d..ac73107d4 100644 --- a/nomad/state/deployment_events_test.go +++ b/nomad/state/deployment_events_test.go @@ -30,7 +30,7 @@ func TestDeploymentEventFromChanges(t *testing.T) { d := mock.Deployment() d.JobID = j.ID - require.NoError(t, s.upsertJobImpl(10, j, false, setupTx)) + require.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) setupTx.Txn.Commit() diff --git a/nomad/state/events_test.go b/nomad/state/events_test.go index bcbffc3cf..c9d88322d 100644 --- a/nomad/state/events_test.go +++ b/nomad/state/events_test.go @@ -114,7 +114,7 @@ func TestEventsFromChanges_DeploymentUpdate(t *testing.T) { d := mock.Deployment() d.JobID = j.ID - require.NoError(t, s.upsertJobImpl(10, j, false, setupTx)) + require.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) setupTx.Txn.Commit() @@ -158,7 +158,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { tg2 := tg1.Copy() tg2.Name = "foo" j.TaskGroups = append(j.TaskGroups, tg2) - require.NoError(t, s.upsertJobImpl(10, j, false, setupTx)) + require.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) d := mock.Deployment() d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion @@ -235,7 +235,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { tg2 := tg1.Copy() tg2.Name = "foo" j.TaskGroups = append(j.TaskGroups, tg2) - require.NoError(t, s.upsertJobImpl(10, j, false, setupTx)) + require.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) d := mock.Deployment() d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion @@ -440,7 +440,7 @@ func TestEventsFromChanges_ApplyPlanResultsRequestType(t *testing.T) { alloc.DeploymentID = d.ID alloc2.DeploymentID = d.ID - require.NoError(t, s.UpsertJob(structs.MsgTypeTestSetup, 9, job)) + require.NoError(t, s.UpsertJob(structs.MsgTypeTestSetup, 9, nil, job)) eval := mock.Eval() eval.JobID = job.ID @@ -587,7 +587,7 @@ func TestEventsFromChanges_AllocUpdateDesiredTransitionRequestType(t *testing.T) alloc := mock.Alloc() - require.Nil(t, s.UpsertJob(structs.MsgTypeTestSetup, 10, alloc.Job)) + require.Nil(t, s.UpsertJob(structs.MsgTypeTestSetup, 10, nil, alloc.Job)) require.Nil(t, s.UpsertAllocs(structs.MsgTypeTestSetup, 11, []*structs.Allocation{alloc})) msgType := structs.AllocUpdateDesiredTransitionRequestType diff --git a/nomad/state/schema.go b/nomad/state/schema.go index 55bf6477a..d3f856752 100644 --- a/nomad/state/schema.go +++ b/nomad/state/schema.go @@ -7,7 +7,7 @@ import ( "fmt" "sync" - memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/nomad/state/indexer" "github.com/hashicorp/nomad/nomad/structs" ) @@ -69,6 +69,7 @@ func init() { jobTableSchema, jobSummarySchema, jobVersionSchema, + jobSubmissionSchema, deploymentSchema, periodicLaunchTableSchema, evalTableSchema, @@ -280,6 +281,52 @@ func jobVersionSchema() *memdb.TableSchema { } } +// jobSubmissionSchema returns the memdb table schema of job submissions +// which contain the original source material of each job, per version. +// Unique index by Namespace, JobID, and Version. +func jobSubmissionSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: "job_submission", + Indexes: map[string]*memdb.IndexSchema{ + "id": { + Name: "id", + AllowMissing: false, + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Namespace", + }, + &memdb.StringFieldIndex{ + Field: "JobID", + Lowercase: true, + }, + &memdb.UintFieldIndex{ + Field: "Version", + }, + }, + }, + }, + "by_jobID": { + Name: "by_jobID", + AllowMissing: false, + Unique: false, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Namespace", + }, + &memdb.StringFieldIndex{ + Field: "JobID", + Lowercase: true, + }, + }, + }, + }, + }, + } +} + // jobIsGCable satisfies the ConditionalIndexFunc interface and creates an index // on whether a job is eligible for garbage collection. func jobIsGCable(obj interface{}) (bool, error) { diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index e5e1958d5..0549ea372 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -18,8 +18,10 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-set" "github.com/hashicorp/nomad/helper/pointer" + "github.com/hashicorp/nomad/lib/lang" "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" + "golang.org/x/exp/slices" ) // Txn is a transaction against a state store. @@ -1634,10 +1636,10 @@ func (s *StateStore) Nodes(ws memdb.WatchSet) (memdb.ResultIterator, error) { } // UpsertJob is used to register a job or update a job definition -func (s *StateStore) UpsertJob(msgType structs.MessageType, index uint64, job *structs.Job) error { +func (s *StateStore) UpsertJob(msgType structs.MessageType, index uint64, sub *structs.JobSubmission, job *structs.Job) error { txn := s.db.WriteTxnMsgT(msgType, index) defer txn.Abort() - if err := s.upsertJobImpl(index, job, false, txn); err != nil { + if err := s.upsertJobImpl(index, sub, job, false, txn); err != nil { return err } return txn.Commit() @@ -1645,12 +1647,12 @@ func (s *StateStore) UpsertJob(msgType structs.MessageType, index uint64, job *s // UpsertJobTxn is used to register a job or update a job definition, like UpsertJob, // but in a transaction. Useful for when making multiple modifications atomically -func (s *StateStore) UpsertJobTxn(index uint64, job *structs.Job, txn Txn) error { - return s.upsertJobImpl(index, job, false, txn) +func (s *StateStore) UpsertJobTxn(index uint64, sub *structs.JobSubmission, job *structs.Job, txn Txn) error { + return s.upsertJobImpl(index, sub, job, false, txn) } // upsertJobImpl is the implementation for registering a job or updating a job definition -func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion bool, txn *txn) error { +func (s *StateStore) upsertJobImpl(index uint64, sub *structs.JobSubmission, job *structs.Job, keepVersion bool, txn *txn) error { // Assert the namespace exists if exists, err := s.namespaceExists(txn, job.Namespace); err != nil { return err @@ -1727,6 +1729,10 @@ func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion b return fmt.Errorf("unable to update job csi plugins: %v", err) } + if err := s.updateJobSubmission(index, sub, job.Namespace, job.ID, job.Version, txn); err != nil { + return fmt.Errorf("unable to update job submission: %v", err) + } + // Insert the job if err := txn.Insert("jobs", job); err != nil { return fmt.Errorf("job insert failed: %v", err) @@ -1835,6 +1841,11 @@ func (s *StateStore) DeleteJobTxn(index uint64, namespace, jobID string, txn Txn return fmt.Errorf("index update failed: %v", err) } + // Delete the job submission + if err := s.deleteJobSubmission(job, txn); err != nil { + return fmt.Errorf("deleting job submission failed: %v", err) + } + // Delete any remaining job scaling policies if err := s.deleteJobScalingPolicies(index, job, txn); err != nil { return fmt.Errorf("deleting job scaling policies failed: %v", err) @@ -1889,6 +1900,11 @@ func (s *StateStore) deleteJobScalingPolicies(index uint64, job *structs.Job, tx return nil } +func (s *StateStore) deleteJobSubmission(job *structs.Job, txn *txn) error { + _, err := txn.DeleteAll("job_submission", "by_jobID", job.Namespace, job.ID) + return err +} + // deleteJobVersions deletes all versions of the given job. func (s *StateStore) deleteJobVersions(index uint64, job *structs.Job, txn *txn) error { iter, err := txn.Get("job_version", "id_prefix", job.Namespace, job.ID) @@ -1977,6 +1993,27 @@ func (s *StateStore) upsertJobVersion(index uint64, job *structs.Job, txn *txn) return nil } +// JobSubmission returns the original HCL/Variables context of a job, if available. +// +// Note: it is a normal case for the submission context to be unavailable, in which case +// nil is returned with no error. +func (s *StateStore) JobSubmission(ws memdb.WatchSet, namespace, jobName string, version uint64) (*structs.JobSubmission, error) { + txn := s.db.ReadTxn() + return s.jobSubmission(ws, namespace, jobName, version, txn) +} + +func (s *StateStore) jobSubmission(ws memdb.WatchSet, namespace, jobName string, version uint64, txn Txn) (*structs.JobSubmission, error) { + watchCh, existing, err := txn.FirstWatch("job_submission", "id", namespace, jobName, version) + if err != nil { + return nil, fmt.Errorf("job submission lookup failed: %v", err) + } + ws.Add(watchCh) + if existing != nil { + return existing.(*structs.JobSubmission), nil + } + return nil, nil +} + // JobByID is used to lookup a job by its ID. JobByID returns the current/latest job // version. func (s *StateStore) JobByID(ws memdb.WatchSet, namespace, id string) (*structs.Job, error) { @@ -4493,7 +4530,7 @@ func (s *StateStore) UpdateDeploymentStatus(msgType structs.MessageType, index u // Upsert the job if necessary if req.Job != nil { - if err := s.upsertJobImpl(index, req.Job, false, txn); err != nil { + if err := s.upsertJobImpl(index, nil, req.Job, false, txn); err != nil { return err } } @@ -4580,7 +4617,7 @@ func (s *StateStore) updateJobStabilityImpl(index uint64, namespace, jobID strin copy := job.Copy() copy.Stable = stable - return s.upsertJobImpl(index, copy, true, txn) + return s.upsertJobImpl(index, nil, copy, true, txn) } // UpdateDeploymentPromotion is used to promote canaries in a deployment and @@ -4809,7 +4846,7 @@ func (s *StateStore) UpdateDeploymentAllocHealth(msgType structs.MessageType, in // Upsert the job if necessary if req.Job != nil { - if err := s.upsertJobImpl(index, req.Job, false, txn); err != nil { + if err := s.upsertJobImpl(index, nil, req.Job, false, txn); err != nil { return err } } @@ -5318,6 +5355,74 @@ func (s *StateStore) updateJobScalingPolicies(index uint64, job *structs.Job, tx return nil } +// updateJobSubmission stores the original job source and variables associated that the +// job structure originates from. It is up to the job submitter to include the source +// material, and as such sub may be nil, in which case nothing is stored. +func (s *StateStore) updateJobSubmission(index uint64, sub *structs.JobSubmission, namespace, jobID string, version uint64, txn *txn) error { + switch { + case sub == nil: + return nil + case namespace == "": + return errors.New("job_submission requires a namespace") + case jobID == "": + return errors.New("job_submission requires a jobID") + default: + sub.Namespace = namespace + sub.JobID = jobID + sub.JobModifyIndex = index + sub.Version = version + } + + // insert the job submission + if err := txn.Insert("job_submission", sub); err != nil { + return err + } + + // prune old job submissions + return s.pruneJobSubmissions(namespace, jobID, txn) +} + +func (s *StateStore) pruneJobSubmissions(namespace, jobID string, txn *txn) error { + // although the number of tracked submissions is the same as the number of + // tracked job versions, do not assume a 1:1 correlation, as there could be + // holes in the submissions (or none at all) + limit := structs.JobTrackedVersions + + iter, err := txn.Get("job_submission", "by_jobID", namespace, jobID) + if err != nil { + return err + } + + // lookup each stored submission's (modify index, version) + stored := make([]lang.Pair[uint64, uint64], 0, limit+1) + for next := iter.Next(); next != nil; next = iter.Next() { + sub := next.(*structs.JobSubmission) + stored = append(stored, lang.Pair[uint64, uint64]{First: sub.JobModifyIndex, Second: sub.Version}) + } + + // if we are still below the limit, nothing to do + if len(stored) <= limit { + return nil + } + + // sort by job modify index descending so we can just keep the first N + slices.SortFunc(stored, func(a, b lang.Pair[uint64, uint64]) bool { + return a.First > b.First + }) + + // remove the outdated submission versions + for _, sub := range stored[limit:] { + if err = txn.Delete("job_submission", &structs.JobSubmission{ + Namespace: namespace, + JobID: jobID, + Version: sub.Second, + }); err != nil { + return err + } + } + return nil +} + // updateJobCSIPlugins runs on job update, and indexes the job in the plugin func (s *StateStore) updateJobCSIPlugins(index uint64, job, prev *structs.Job, txn *txn) error { plugIns := make(map[string]*structs.CSIPlugin) diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 8193cc510..5afa1e61c 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -8,6 +8,7 @@ import ( "fmt" "reflect" "sort" + "strconv" "strings" "testing" "time" @@ -110,7 +111,7 @@ func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing job := alloc.Job alloc.Job = nil - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job); err != nil { t.Fatalf("err: %v", err) } @@ -181,13 +182,13 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { require := require.New(t) require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 900, []*structs.Allocation{stoppedAlloc, preemptedAlloc})) - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 999, job)) + require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job)) // modify job and ensure that stopped and preempted alloc point to original Job mJob := job.Copy() mJob.TaskGroups[0].Name = "other" - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1001, mJob)) + require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, mJob)) eval := mock.Eval() eval.JobID = job.ID @@ -266,7 +267,7 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { alloc.DeploymentID = d.ID alloc2.DeploymentID = d.ID - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job); err != nil { t.Fatalf("err: %v", err) } @@ -369,7 +370,7 @@ func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { alloc.Job = nil // Insert job - err := state.UpsertJob(structs.MsgTypeTestSetup, 999, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job) require.NoError(err) // Create an eval @@ -446,7 +447,7 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { // Create a job that applies to all job := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, job); err != nil { t.Fatalf("err: %v", err) } @@ -574,7 +575,7 @@ func TestStateStore_OldDeployment(t *testing.T) { state := testStateStore(t) job := mock.Job() job.ID = "job1" - state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) deploy1 := mock.Deployment() deploy1.JobID = job.ID @@ -990,7 +991,7 @@ func TestStateStore_DeleteNamespaces_NonTerminalJobs(t *testing.T) { job := mock.Job() job.Namespace = ns.Name - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job)) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() @@ -1909,7 +1910,7 @@ func TestStateStore_UpsertJob_Job(t *testing.T) { t.Fatalf("bad: %v", err) } - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { @@ -1989,14 +1990,14 @@ func TestStateStore_UpdateUpsertJob_Job(t *testing.T) { t.Fatalf("bad: %v", err) } - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { t.Fatalf("err: %v", err) } job2 := mock.Job() job2.ID = job.ID job2.AllAtOnce = true - err = state.UpsertJob(structs.MsgTypeTestSetup, 1001, job2) + err = state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2) if err != nil { t.Fatalf("err: %v", err) } @@ -2094,7 +2095,7 @@ func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) { t.Fatalf("bad: %v", err) } - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { t.Fatalf("err: %v", err) } @@ -2102,7 +2103,7 @@ func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) { job2 := job.Copy() job2.Periodic = nil job2.ID = fmt.Sprintf("%v/%s-1490635020", job.ID, structs.PeriodicLaunchSuffix) - err = state.UpsertJob(structs.MsgTypeTestSetup, 1001, job2) + err = state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2) if err != nil { t.Fatalf("err: %v", err) } @@ -2116,7 +2117,7 @@ func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) { job3 := job.Copy() job3.TaskGroups[0].Tasks[0].Name = "new name" - err = state.UpsertJob(structs.MsgTypeTestSetup, 1003, job3) + err = state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, job3) if err != nil { t.Fatalf("err: %v", err) } @@ -2145,7 +2146,7 @@ func TestStateStore_UpsertJob_BadNamespace(t *testing.T) { job := mock.Job() job.Namespace = "foo" - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) assert.Contains(err.Error(), "nonexistent namespace") ws := memdb.NewWatchSet() @@ -2169,14 +2170,14 @@ func TestStateStore_UpsertJob_ChildJob(t *testing.T) { t.Fatalf("bad: %v", err) } - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, parent); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, parent); err != nil { t.Fatalf("err: %v", err) } child := mock.Job() child.Status = "" child.ParentID = parent.ID - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1001, child); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, child); err != nil { t.Fatalf("err: %v", err) } @@ -2201,6 +2202,70 @@ func TestStateStore_UpsertJob_ChildJob(t *testing.T) { } } +func TestStateStore_UpsertJob_submission(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + + job := mock.Job() + job.Meta = map[string]string{"version": "1"} + submission := &structs.JobSubmission{ + Source: "source", + Version: 0, + } + + index := uint64(1000) + + // initially non-existent + sub, err := state.JobSubmission(nil, job.Namespace, job.ID, 0) + must.NoError(t, err) + must.Nil(t, sub) + + // insert first one, version 0, index 1001 + index++ + err = state.UpsertJob(structs.JobRegisterRequestType, index, submission, job) + must.NoError(t, err) + + // query first one, version 0 + sub, err = state.JobSubmission(nil, job.Namespace, job.ID, 0) + must.NoError(t, err) + must.NotNil(t, sub) + must.Eq(t, 0, sub.Version) + must.Eq(t, index, sub.JobModifyIndex) + + // insert 6 more, going over the limit + for i := 1; i <= structs.JobTrackedVersions; i++ { + index++ + job2 := job.Copy() + job2.Meta["version"] = strconv.Itoa(i) + sub2 := &structs.JobSubmission{ + Source: "source", + Version: uint64(i), + } + err = state.UpsertJob(structs.JobRegisterRequestType, index, sub2, job2) + must.NoError(t, err) + } + + // the version 0 submission is now dropped + sub, err = state.JobSubmission(nil, job.Namespace, job.ID, 0) + must.NoError(t, err) + must.Nil(t, sub) + + // but we do have version 1 + sub, err = state.JobSubmission(nil, job.Namespace, job.ID, 1) + must.NoError(t, err) + must.NotNil(t, sub) + must.Eq(t, 1, sub.Version) + must.Eq(t, 1002, sub.JobModifyIndex) + + // and up to version 6 + sub, err = state.JobSubmission(nil, job.Namespace, job.ID, 6) + must.NoError(t, err) + must.NotNil(t, sub) + must.Eq(t, 6, sub.Version) + must.Eq(t, 1007, sub.JobModifyIndex) +} + func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { ci.Parallel(t) @@ -2218,7 +2283,7 @@ func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { t.Fatalf("bad: %v", err) } - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { t.Fatalf("err: %v", err) } @@ -2231,7 +2296,7 @@ func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { finalJob = mock.Job() finalJob.ID = job.ID finalJob.Name = fmt.Sprintf("%d", i) - err = state.UpsertJob(structs.MsgTypeTestSetup, uint64(1000+i), finalJob) + err = state.UpsertJob(structs.MsgTypeTestSetup, uint64(1000+i), nil, finalJob) if err != nil { t.Fatalf("err: %v", err) } @@ -2298,7 +2363,7 @@ func TestStateStore_DeleteJob_Job(t *testing.T) { state := testStateStore(t) job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -2388,7 +2453,7 @@ func TestStateStore_DeleteJobTxn_BatchDeletes(t *testing.T) { stateIndex++ job := mock.BatchJob() - err := state.UpsertJob(structs.MsgTypeTestSetup, stateIndex, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, stateIndex, nil, job) require.NoError(t, err) jobs[i] = job @@ -2402,7 +2467,7 @@ func TestStateStore_DeleteJobTxn_BatchDeletes(t *testing.T) { "Version": fmt.Sprintf("%d", vi), } - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, stateIndex, job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, stateIndex, nil, job)) } } @@ -2459,7 +2524,7 @@ func TestStateStore_DeleteJob_MultipleVersions(t *testing.T) { ws := memdb.NewWatchSet() _, err := state.JobVersionsByID(ws, job.Namespace, job.ID) assert.Nil(err) - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, job)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) assert.True(watchFired(ws)) var finalJob *structs.Job @@ -2467,7 +2532,7 @@ func TestStateStore_DeleteJob_MultipleVersions(t *testing.T) { finalJob = mock.Job() finalJob.ID = job.ID finalJob.Priority = i - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, uint64(1000+i), finalJob)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, uint64(1000+i), nil, finalJob)) } assert.Nil(state.DeleteJob(1020, job.Namespace, job.ID)) @@ -2507,7 +2572,7 @@ func TestStateStore_DeleteJob_ChildJob(t *testing.T) { state := testStateStore(t) parent := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, parent); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent); err != nil { t.Fatalf("err: %v", err) } @@ -2515,7 +2580,7 @@ func TestStateStore_DeleteJob_ChildJob(t *testing.T) { child.Status = "" child.ParentID = parent.ID - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, child); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child); err != nil { t.Fatalf("err: %v", err) } @@ -2565,7 +2630,7 @@ func TestStateStore_Jobs(t *testing.T) { job := mock.Job() jobs = append(jobs, job) - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -2607,7 +2672,7 @@ func TestStateStore_JobVersions(t *testing.T) { job := mock.Job() jobs = append(jobs, job) - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -2646,7 +2711,7 @@ func TestStateStore_JobsByIDPrefix(t *testing.T) { job := mock.Job() job.ID = "redis" - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -2689,7 +2754,7 @@ func TestStateStore_JobsByIDPrefix(t *testing.T) { job = mock.Job() job.ID = "riak" - err = state.UpsertJob(structs.MsgTypeTestSetup, 1001, job) + err = state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -2742,8 +2807,8 @@ func TestStateStore_JobsByIDPrefix_Namespaces(t *testing.T) { job2.Namespace = ns2.Name require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, job1)) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, job2)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job1)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2)) gatherJobs := func(iter memdb.ResultIterator) []*structs.Job { var jobs []*structs.Job @@ -2784,7 +2849,7 @@ func TestStateStore_JobsByIDPrefix_Namespaces(t *testing.T) { job3 := mock.Job() job3.ID = "riak" job3.Namespace = ns1.Name - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, job3)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, job3)) require.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -2833,10 +2898,10 @@ func TestStateStore_JobsByNamespace(t *testing.T) { _, err = state.JobsByNamespace(watches[1], ns2.Name) require.NoError(t, err) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, job1)) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1002, job2)) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, job3)) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1004, job4)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job1)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1002, nil, job2)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, job3)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1004, nil, job4)) require.True(t, watchFired(watches[0])) require.True(t, watchFired(watches[1])) @@ -2890,7 +2955,7 @@ func TestStateStore_JobsByPeriodic(t *testing.T) { job := mock.Job() nonPeriodic = append(nonPeriodic, job) - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -2900,7 +2965,7 @@ func TestStateStore_JobsByPeriodic(t *testing.T) { job := mock.PeriodicJob() periodic = append(periodic, job) - err := state.UpsertJob(structs.MsgTypeTestSetup, 2000+uint64(i), job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 2000+uint64(i), nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -2963,7 +3028,7 @@ func TestStateStore_JobsByScheduler(t *testing.T) { job := mock.Job() serviceJobs = append(serviceJobs, job) - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -2974,7 +3039,7 @@ func TestStateStore_JobsByScheduler(t *testing.T) { job.Status = structs.JobStatusRunning sysJobs = append(sysJobs, job) - err := state.UpsertJob(structs.MsgTypeTestSetup, 2000+uint64(i), job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 2000+uint64(i), nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -3041,7 +3106,7 @@ func TestStateStore_JobsByGC(t *testing.T) { } nonGc[job.ID] = struct{}{} - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job); err != nil { t.Fatalf("err: %v", err) } } @@ -3051,7 +3116,7 @@ func TestStateStore_JobsByGC(t *testing.T) { job.Type = structs.JobTypeBatch gc[job.ID] = struct{}{} - if err := state.UpsertJob(structs.MsgTypeTestSetup, 2000+uint64(i), job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 2000+uint64(i), nil, job); err != nil { t.Fatalf("err: %v", err) } @@ -3645,11 +3710,11 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { controllerJob := mock.CSIPluginJob(structs.CSIPluginTypeController, plugID) controllerJobID = controllerJob.ID - err = store.UpsertJob(structs.MsgTypeTestSetup, nextIndex(store), controllerJob) + err = store.UpsertJob(structs.MsgTypeTestSetup, nextIndex(store), nil, controllerJob) nodeJob := mock.CSIPluginJob(structs.CSIPluginTypeNode, plugID) nodeJobID = nodeJob.ID - err = store.UpsertJob(structs.MsgTypeTestSetup, nextIndex(store), nodeJob) + err = store.UpsertJob(structs.MsgTypeTestSetup, nextIndex(store), nil, nodeJob) // plugins created, but no fingerprints or allocs yet // note: there's no job summary yet, but we know the task @@ -3979,7 +4044,7 @@ func TestStateStore_LatestIndex(t *testing.T) { } exp := uint64(2000) - if err := state.UpsertJob(structs.MsgTypeTestSetup, exp, mock.Job()); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, exp, nil, mock.Job()); err != nil { t.Fatalf("err: %v", err) } @@ -4260,7 +4325,7 @@ func TestStateStore_UpsertEvals_Eval_ChildJob(t *testing.T) { state := testStateStore(t) parent := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, parent); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent); err != nil { t.Fatalf("err: %v", err) } @@ -4268,7 +4333,7 @@ func TestStateStore_UpsertEvals_Eval_ChildJob(t *testing.T) { child.Status = "" child.ParentID = parent.ID - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, child); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child); err != nil { t.Fatalf("err: %v", err) } @@ -4498,7 +4563,7 @@ func TestStateStore_DeleteEval_ChildJob(t *testing.T) { state := testStateStore(t) parent := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, parent); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent); err != nil { t.Fatalf("err: %v", err) } @@ -4506,7 +4571,7 @@ func TestStateStore_DeleteEval_ChildJob(t *testing.T) { child.Status = "" child.ParentID = parent.ID - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, child); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child); err != nil { t.Fatalf("err: %v", err) } @@ -5197,12 +5262,12 @@ func TestStateStore_UpdateAllocsFromClient(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 997, node)) parent := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 998, parent)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent)) child := mock.Job() child.Status = "" child.ParentID = parent.ID - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, child)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child)) alloc := mock.Alloc() alloc.NodeID = node.ID @@ -5267,8 +5332,8 @@ func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { alloc2.NodeID = node.ID must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc1.Job)) - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc2.Job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc1.Job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc2.Job)) must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) // Create watchsets so we can test that update fires the watch @@ -5372,7 +5437,7 @@ func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { alloc.NodeID = node.ID must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc.Job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) // Create the delta updates @@ -5442,7 +5507,7 @@ func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { alloc.DeploymentID = deployment.ID must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc.Job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) must.NoError(t, state.UpsertDeployment(1000, deployment)) must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) @@ -5494,7 +5559,7 @@ func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { } must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc.Job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) must.NoError(t, state.UpsertDeployment(1000, deployment)) must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) @@ -5542,8 +5607,8 @@ func TestStateStore_UpdateAllocsFromClient_UpdateNodes(t *testing.T) { must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node1)) must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1001, node2)) must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, node3)) - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, alloc1.Job)) - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1004, alloc2.Job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, alloc1.Job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1004, nil, alloc2.Job)) must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{alloc1, alloc2, alloc3})) // Create watches to make sure they fire when nodes are updated. @@ -5620,7 +5685,7 @@ func TestStateStore_UpsertAlloc_Alloc(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc.Job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { t.Fatalf("err: %v", err) } @@ -5703,7 +5768,7 @@ func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { deployment.TaskGroups[alloc.TaskGroup].ProgressDeadline = pdeadline alloc.DeploymentID = deployment.ID - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc.Job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) require.Nil(state.UpsertDeployment(1000, deployment)) // Create a watch set so we can test that update fires the watch @@ -5765,8 +5830,8 @@ func TestStateStore_UpsertAlloc_AllocsByNamespace(t *testing.T) { alloc4.Job.Namespace = ns2.Name require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc1.Job)) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, alloc3.Job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc1.Job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, alloc3.Job)) // Create watchsets so we can test that update fires the watch watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} @@ -5840,13 +5905,13 @@ func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { state := testStateStore(t) parent := mock.Job() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 998, parent)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent)) child := mock.Job() child.Status = "" child.ParentID = parent.ID - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, child)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child)) alloc := mock.Alloc() alloc.JobID = child.ID @@ -5883,7 +5948,7 @@ func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc.Job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { t.Fatalf("err: %v", err) } @@ -5985,7 +6050,7 @@ func TestStateStore_UpdateAlloc_Lost(t *testing.T) { alloc := mock.Alloc() alloc.ClientStatus = "foo" - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc.Job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { t.Fatalf("err: %v", err) } @@ -6023,7 +6088,7 @@ func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { // Upsert a job state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc.Job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { t.Fatalf("err: %v", err) } @@ -6066,7 +6131,7 @@ func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc.Job)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) t1 := &structs.DesiredTransition{ @@ -6134,7 +6199,7 @@ func TestStateStore_JobSummary(t *testing.T) { // Add a job job := mock.Job() - state.UpsertJob(structs.MsgTypeTestSetup, 900, job) + state.UpsertJob(structs.MsgTypeTestSetup, 900, nil, job) // Get the job back ws := memdb.NewWatchSet() @@ -6216,7 +6281,7 @@ func TestStateStore_JobSummary(t *testing.T) { // Re-register the same job job1 := mock.Job() job1.ID = job.ID - state.UpsertJob(structs.MsgTypeTestSetup, 1000, job1) + state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job1) outJob2, _ := state.JobByID(ws, job1.Namespace, job1.ID) if outJob2.CreateIndex != 1000 { t.Fatalf("bad create index: %v", outJob2.CreateIndex) @@ -6263,7 +6328,7 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { tg2 := alloc.Job.TaskGroups[0].Copy() tg2.Name = "db" alloc.Job.TaskGroups = append(alloc.Job.TaskGroups, tg2) - state.UpsertJob(structs.MsgTypeTestSetup, 100, alloc.Job) + state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, alloc.Job) // Create one more alloc for the db task group alloc2 := mock.Alloc() @@ -6366,7 +6431,7 @@ func TestStateStore_ReconcileParentJobSummary(t *testing.T) { Payload: "random", } job1.TaskGroups[0].Count = 1 - state.UpsertJob(structs.MsgTypeTestSetup, 100, job1) + state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job1) // Make a child job childJob := job1.Copy() @@ -6388,7 +6453,7 @@ func TestStateStore_ReconcileParentJobSummary(t *testing.T) { alloc2.JobID = childJob.ID alloc2.ClientStatus = structs.AllocClientStatusFailed - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 110, childJob)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 110, nil, childJob)) require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 111, []*structs.Allocation{alloc, alloc2})) // Make the summary incorrect in the state store @@ -6445,7 +6510,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() - state.UpsertJob(structs.MsgTypeTestSetup, 100, alloc.Job) + state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, alloc.Job) state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc}) // Delete the job @@ -6461,7 +6526,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { } // Re-Register the job - state.UpsertJob(structs.MsgTypeTestSetup, 500, alloc.Job) + state.UpsertJob(structs.MsgTypeTestSetup, 500, nil, alloc.Job) // Update the alloc again alloc2 := alloc.Copy() @@ -6675,7 +6740,7 @@ func TestStateStore_AllocsForRegisteredJob(t *testing.T) { job := mock.Job() job.ID = "foo" - state.UpsertJob(structs.MsgTypeTestSetup, 100, job) + state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) for i := 0; i < 3; i++ { alloc := mock.Alloc() alloc.Job = job @@ -6693,7 +6758,7 @@ func TestStateStore_AllocsForRegisteredJob(t *testing.T) { job1 := mock.Job() job1.ID = "foo" job1.CreateIndex = 50 - state.UpsertJob(structs.MsgTypeTestSetup, 300, job1) + state.UpsertJob(structs.MsgTypeTestSetup, 300, nil, job1) for i := 0; i < 4; i++ { alloc := mock.Alloc() alloc.Job = job1 @@ -7328,7 +7393,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { t.Fatalf("bad: %v", err) } - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { t.Fatalf("err: %v", err) } @@ -7461,7 +7526,7 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { alloc3.Job = job alloc3.JobID = job.ID - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) if err != nil { t.Fatalf("err: %v", err) } @@ -7637,7 +7702,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { // Insert a job job := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job); err != nil { t.Fatalf("bad: %v", err) } @@ -7694,9 +7759,9 @@ func TestStateStore_UpdateJobStability(t *testing.T) { // Insert a job twice to get two versions job := mock.Job() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1, job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job)) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 2, job.Copy())) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 2, nil, job.Copy())) // Update the stability to true err := state.UpdateJobStability(3, job.Namespace, job.ID, 0, true) @@ -7777,7 +7842,7 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { // Create a job j := mock.Job() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, j)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, j)) // Create a deployment d := mock.Deployment() @@ -7826,7 +7891,7 @@ func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { // Create a job j := mock.Job() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, j)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, j)) // Create a deployment d := mock.Deployment() @@ -7858,7 +7923,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { tg2 := tg1.Copy() tg2.Name = "foo" j.TaskGroups = append(j.TaskGroups, tg2) - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, j); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, j); err != nil { t.Fatalf("bad: %v", err) } @@ -7958,7 +8023,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { tg2 := tg1.Copy() tg2.Name = "foo" j.TaskGroups = append(j.TaskGroups, tg2) - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, j)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, j)) // Create a deployment d := mock.Deployment() @@ -8131,7 +8196,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { // Create a Job job := mock.Job() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 3, job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 3, nil, job)) // Create alloc with canary status a := mock.Alloc() @@ -8201,7 +8266,7 @@ func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { // Create a Job job := mock.Job() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 3, job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 3, nil, job)) // Create alloc with canary status a := mock.Alloc() @@ -9638,7 +9703,7 @@ func TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex(t *testing.T) { job, policy := mock.JobWithScalingPolicy() var newIndex uint64 = 1000 - err := state.UpsertJob(structs.MsgTypeTestSetup, newIndex, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, newIndex, nil, job) require.NoError(err) ws := memdb.NewWatchSet() @@ -9656,7 +9721,7 @@ func TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex(t *testing.T) { // update the job job.Meta["new-meta"] = "new-value" newIndex += 100 - err = state.UpsertJob(structs.MsgTypeTestSetup, newIndex, job) + err = state.UpsertJob(structs.MsgTypeTestSetup, newIndex, nil, job) require.NoError(err) require.False(watchFired(ws), "watch should not have fired") @@ -9683,7 +9748,7 @@ func TestStateStore_UpsertJob_UpdateScalingPolicy(t *testing.T) { job, policy := mock.JobWithScalingPolicy() var oldIndex uint64 = 1000 - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, oldIndex, job)) + require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, oldIndex, nil, job)) ws := memdb.NewWatchSet() p1, err := state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) @@ -9702,7 +9767,7 @@ func TestStateStore_UpsertJob_UpdateScalingPolicy(t *testing.T) { newPolicy := p1.Copy() newPolicy.Policy["new-field"] = "new-value" job.TaskGroups[0].Scaling = newPolicy - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, oldIndex+100, job)) + require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, oldIndex+100, nil, job)) require.True(watchFired(ws), "watch should have fired") p2, err := state.ScalingPolicyByTargetAndType(nil, policy.Target, policy.Type) @@ -9782,7 +9847,7 @@ func TestStateStore_StopJob_DeleteScalingPolicies(t *testing.T) { job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.NoError(err) policy := mock.ScalingPolicy() @@ -9803,7 +9868,7 @@ func TestStateStore_StopJob_DeleteScalingPolicies(t *testing.T) { job, err = state.JobByID(nil, job.Namespace, job.ID) require.NoError(err) job.Stop = true - err = state.UpsertJob(structs.MsgTypeTestSetup, 1200, job) + err = state.UpsertJob(structs.MsgTypeTestSetup, 1200, nil, job) require.NoError(err) // Ensure: @@ -9837,7 +9902,7 @@ func TestStateStore_UnstopJob_UpsertScalingPolicies(t *testing.T) { require.Nil(list.Next()) // upsert a stopped job, verify that we don't fire the watcher or add any scaling policies - err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.NoError(err) require.True(watchFired(ws)) list, err = state.ScalingPolicies(ws) @@ -9850,7 +9915,7 @@ func TestStateStore_UnstopJob_UpsertScalingPolicies(t *testing.T) { require.NoError(err) // Unstop this job, say you'll run it again... job.Stop = false - err = state.UpsertJob(structs.MsgTypeTestSetup, 1100, job) + err = state.UpsertJob(structs.MsgTypeTestSetup, 1100, nil, job) require.NoError(err) // Ensure the scaling policy still exists, watch was not fired, index was not advanced @@ -9872,7 +9937,7 @@ func TestStateStore_DeleteJob_DeleteScalingPolicies(t *testing.T) { job := mock.Job() - err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.NoError(err) policy := mock.ScalingPolicy() @@ -9902,10 +9967,10 @@ func TestStateStore_DeleteJob_DeleteScalingPoliciesPrefixBug(t *testing.T) { state := testStateStore(t) job := mock.Job() - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1000, job)) + require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) job2 := job.Copy() job2.ID = job.ID + "-but-longer" - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1001, job2)) + require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2)) policy := mock.ScalingPolicy() policy.Target[structs.ScalingTargetJob] = job.ID @@ -9940,7 +10005,7 @@ func TestStateStore_DeleteJob_ScalingPolicyIndexNoop(t *testing.T) { prevIndex, err := state.Index("scaling_policy") require.NoError(err) - err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) require.NoError(err) newIndex, err := state.Index("scaling_policy") @@ -10458,7 +10523,7 @@ func TestStateSnapshot_DenormalizeAllocationDiffSlice_AllocDoesNotExist(t *testi require := require.New(t) // Insert job - err := state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc.Job) + err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job) require.NoError(err) allocDiffs := []*structs.AllocationDiff{ diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 2c7cb1f90..caa3de67c 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -694,6 +694,9 @@ type NodeSpecificRequest struct { // JobRegisterRequest is used for Job.Register endpoint // to register a job as being a schedulable entity. type JobRegisterRequest struct { + Submission *JobSubmission + + // Job is the parsed job, no matter what form the input was in. Job *Job // If EnforceIndex is set then the job will only be registered if the passed @@ -792,6 +795,23 @@ type EvalOptions struct { ForceReschedule bool } +// JobSubmissionRequest is used to query a JobSubmission object associated with a +// job at a specific version. +type JobSubmissionRequest struct { + JobID string + Version uint64 + + QueryOptions +} + +// JobSubmissionResponse contains a JobSubmission object, which may be nil +// if no submission data is available. +type JobSubmissionResponse struct { + Submission *JobSubmission + + QueryMeta +} + // JobSpecificRequest is used when we just need to specify a target job type JobSpecificRequest struct { JobID string @@ -4249,6 +4269,44 @@ const ( JobTrackedScalingEvents = 20 ) +// A JobSubmission contains the original job specification, along with the Variables +// submitted with the job. +type JobSubmission struct { + // Source contains the original job definition (may be hc1, hcl2, or json) + Source string + + // Format indicates whether the original job was hcl1, hcl2, or json. + Format string + + // VariableFlags contain the CLI "-var" flag arguments as submitted with the + // job (hcl2 only). + VariableFlags map[string]string + + // Variables contains the opaque variable blob that was input from the + // webUI (hcl2 only). + Variables string + + // Namespace is managed internally, do not set. + // + // The namespace the associated job belongs to. + Namespace string + + // JobID is managed internally, not set. + // + // The job.ID field. + JobID string + + // Version is managed internally, not set. + // + // The version of the Job this submission is associated with. + Version uint64 + + // JobModifyIndex is managed internally, not set. + // + // The raft index the Job this submission is associated with. + JobModifyIndex uint64 +} + // Job is the scope of a scheduling request to Nomad. It is the largest // scoped object, and is a named collection of task groups. Each task group // is further composed of tasks. A task group (TG) is the unit of scheduling diff --git a/nomad/system_endpoint_test.go b/nomad/system_endpoint_test.go index 5691ce723..e67a9b59b 100644 --- a/nomad/system_endpoint_test.go +++ b/nomad/system_endpoint_test.go @@ -31,7 +31,7 @@ func TestSystemEndpoint_GarbageCollect(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.Stop = true - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { t.Fatalf("UpsertJob() failed: %v", err) } @@ -126,7 +126,7 @@ func TestSystemEndpoint_ReconcileSummaries(t *testing.T) { state := s1.fsm.State() s1.fsm.State() job := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job); err != nil { + if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { t.Fatalf("UpsertJob() failed: %v", err) } diff --git a/nomad/testing.go b/nomad/testing.go index 97ee564b4..7d1753982 100644 --- a/nomad/testing.go +++ b/nomad/testing.go @@ -109,6 +109,9 @@ func TestConfigForServer(t testing.T) *Config { } config.SerfConfig.MemberlistConfig.BindPort = ports[1] + // max job submission source size + config.JobMaxSourceSize = 1e6 + return config } diff --git a/nomad/volumewatcher/volumes_watcher_test.go b/nomad/volumewatcher/volumes_watcher_test.go index 5e5048134..fd02f72f3 100644 --- a/nomad/volumewatcher/volumes_watcher_test.go +++ b/nomad/volumewatcher/volumes_watcher_test.go @@ -169,7 +169,7 @@ func TestVolumeWatch_StartStop(t *testing.T) { alloc2.Job = alloc1.Job alloc2.ClientStatus = structs.AllocClientStatusRunning index++ - err := srv.State().UpsertJob(structs.MsgTypeTestSetup, index, alloc1.Job) + err := srv.State().UpsertJob(structs.MsgTypeTestSetup, index, nil, alloc1.Job) require.NoError(t, err) index++ err = srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc1, alloc2}) diff --git a/nomad/worker_test.go b/nomad/worker_test.go index e85d6c684..0e2d4f96f 100644 --- a/nomad/worker_test.go +++ b/nomad/worker_test.go @@ -471,7 +471,7 @@ func TestWorker_SubmitPlan(t *testing.T) { job := mock.Job() eval1 := mock.Eval() eval1.JobID = job.ID - s1.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 1000, job) + s1.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) s1.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1}) // Create the register request @@ -541,7 +541,7 @@ func TestWorker_SubmitPlanNormalizedAllocations(t *testing.T) { job := mock.Job() eval1 := mock.Eval() eval1.JobID = job.ID - s1.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 0, job) + s1.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 0, nil, job) s1.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 0, []*structs.Evaluation{eval1}) stoppedAlloc := mock.Alloc() @@ -592,7 +592,7 @@ func TestWorker_SubmitPlan_MissingNodeRefresh(t *testing.T) { // Create the job job := mock.Job() - s1.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 1000, job) + s1.fsm.State().UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) // Create the register request eval1 := mock.Eval() diff --git a/scheduler/benchmarks/benchmarks_test.go b/scheduler/benchmarks/benchmarks_test.go index 664d1da3c..a39acab41 100644 --- a/scheduler/benchmarks/benchmarks_test.go +++ b/scheduler/benchmarks/benchmarks_test.go @@ -127,7 +127,7 @@ func BenchmarkServiceScheduler(b *testing.B) { } func upsertJob(h *scheduler.Harness, job *structs.Job) *structs.Evaluation { - err := h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job) + err := h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job) if err != nil { panic(err) } diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index 9710c16d0..64111e536 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -400,7 +400,7 @@ func TestCSIVolumeChecker(t *testing.T) { Source: vid2, }, } - err = state.UpsertJob(structs.MsgTypeTestSetup, index, alloc.Job) + err = state.UpsertJob(structs.MsgTypeTestSetup, index, nil, alloc.Job) require.NoError(t, err) index++ summary := mock.JobSummary(alloc.JobID) diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index b9b73ac5a..7c125e7fb 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -37,7 +37,7 @@ func TestServiceSched_JobRegister(t *testing.T) { // Create a job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -191,7 +191,7 @@ func TestServiceSched_JobRegister_MemoryMaxHonored(t *testing.T) { node := mock.Node() require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -251,7 +251,7 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { // Create a job job := mock.Job() job.TaskGroups[0].EphemeralDisk.Sticky = true - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -284,7 +284,7 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { // Update the job to force a rolling upgrade updated := job.Copy() updated.TaskGroups[0].Tasks[0].Resources.CPU += 10 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), updated)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, updated)) // Create a mock evaluation to handle the update eval = &structs.Evaluation{ @@ -345,7 +345,7 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { job := mock.Job() job.TaskGroups[0].Count = 2 job.TaskGroups[0].EphemeralDisk.SizeMB = 88 * 1024 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -423,7 +423,7 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { job := mock.Job() job.TaskGroups[0].Count = 11 job.Constraints = append(job.Constraints, &structs.Constraint{Operand: structs.ConstraintDistinctHosts}) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -517,7 +517,7 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { LTarget: "${meta.rack}", RTarget: "2", }) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -614,7 +614,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { job.TaskGroups[1].Name = "tg2" job.TaskGroups[1].Count = 2 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -685,7 +685,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) Operand: structs.ConstraintDistinctProperty, LTarget: "${node.unique.id}", }) - assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job), "UpsertJob") + assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job), "UpsertJob") // Create some nodes var nodes []*structs.Node @@ -710,7 +710,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) // Update the count job2 := job.Copy() job2.TaskGroups[0].Count = 6 - assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2), "UpsertJob") + assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2), "UpsertJob") // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -787,7 +787,7 @@ func TestServiceSched_Spread(t *testing.T) { }, }, }) - assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job), "UpsertJob") + assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job), "UpsertJob") // Create some nodes, half in dc2 var nodes []*structs.Node nodeMap := make(map[string]*structs.Node) @@ -870,7 +870,7 @@ func TestServiceSched_EvenSpread(t *testing.T) { Attribute: "${node.datacenter}", Weight: 100, }) - assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job), "UpsertJob") + assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job), "UpsertJob") // Create some nodes, half in dc2 var nodes []*structs.Node nodeMap := make(map[string]*structs.Node) @@ -943,7 +943,7 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) { // Create a job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1025,7 +1025,7 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) { // Create a job and set the task group count to zero. job := mock.Job() job.TaskGroups[0].Count = 0 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1070,7 +1070,7 @@ func TestServiceSched_JobRegister_AllocFail(t *testing.T) { // Create NO nodes // Create a job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1160,7 +1160,7 @@ func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { // Create a jobs job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1262,7 +1262,7 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { tg2.Name = "web2" tg2.Constraints[1].RTarget = "class_1" job.TaskGroups = append(job.TaskGroups, tg2) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1341,7 +1341,7 @@ func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { // Create a job and set the task group count to zero. job := mock.Job() job.TaskGroups[0].Count = 0 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock blocked evaluation eval := &structs.Evaluation{ @@ -1384,7 +1384,7 @@ func TestServiceSched_Plan_Partial_Progress(t *testing.T) { job := mock.Job() job.TaskGroups[0].Count = 3 job.TaskGroups[0].Tasks[0].Resources.CPU = 3600 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1449,7 +1449,7 @@ func TestServiceSched_EvaluateBlockedEval(t *testing.T) { // Create a job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock blocked evaluation eval := &structs.Evaluation{ @@ -1502,7 +1502,7 @@ func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { // Create a job and set the task group count to zero. job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock blocked evaluation eval := &structs.Evaluation{ @@ -1591,7 +1591,7 @@ func TestServiceSched_JobModify(t *testing.T) { // Generate a fake job with allocations job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -1624,7 +1624,7 @@ func TestServiceSched_JobModify(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1701,7 +1701,7 @@ func TestServiceSched_JobModify_Datacenters(t *testing.T) { job := mock.Job() job.TaskGroups[0].Count = 3 job.Datacenters = []string{"dc1", "dc2", "dc3"} - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 3; i++ { @@ -1718,7 +1718,7 @@ func TestServiceSched_JobModify_Datacenters(t *testing.T) { job2 := job.Copy() job2.TaskGroups[0].Count = 4 job2.Datacenters = []string{"dc1", "dc2"} - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1777,7 +1777,7 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { job := mock.Job() job.TaskGroups[0].Tasks[0].Resources.CPU = 256 job2 := job.Copy() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation alloc := mock.Alloc() @@ -1791,7 +1791,7 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { // Update the job to count 3 job2.TaskGroups[0].Count = 3 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1872,7 +1872,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { // Generate a fake job with allocations job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -1902,7 +1902,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { job2 := mock.Job() job2.ID = job.ID job2.TaskGroups[0].Count = 0 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1974,7 +1974,7 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { // Generate a fake job with allocations job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -2000,7 +2000,7 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -2103,7 +2103,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { job := mock.Job() job.TaskGroups[0].Count = 1 job.TaskGroups[0].Tasks[0].Resources = request - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.Alloc() alloc.AllocatedResources = allocated @@ -2127,7 +2127,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) eval := &structs.Evaluation{ Namespace: structs.DefaultNamespace, @@ -2204,7 +2204,7 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { // Generate a fake job with allocations job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -2231,7 +2231,7 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -2332,7 +2332,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { job := mock.Job() d := mock.Deployment() d.JobID = job.ID - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) taskName := job.TaskGroups[0].Tasks[0].Name @@ -2374,7 +2374,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { MinHealthyTime: 10 * time.Second, HealthyDeadline: 10 * time.Minute, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -2481,7 +2481,7 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { // Generate a fake job with 0.8 allocations job := mock.Job() job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create 0.8 alloc alloc := mock.Alloc() @@ -2495,7 +2495,7 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { job2 := job.Copy() job2.TaskGroups[0].Tasks[0].Services[0].Tags[0] = "newtag" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -2571,7 +2571,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { Operand: structs.ConstraintDistinctProperty, LTarget: "${meta.rack}", }) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) oldJob := job.Copy() oldJob.JobModifyIndex -= 1 @@ -2689,7 +2689,7 @@ func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -2756,7 +2756,7 @@ func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { // Update the job, such that it cannot be done in-place job2 := job.Copy() job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create and process a mock evaluation eval = &structs.Evaluation{ @@ -2869,7 +2869,7 @@ func TestServiceSched_JobDeregister_Stopped(t *testing.T) { // Generate a fake job with allocations job := mock.Job() job.Stop = true - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -2992,7 +2992,7 @@ func TestServiceSched_NodeDown(t *testing.T) { // Generate a fake job with allocations and an update policy. job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.Alloc() alloc.Job = job @@ -3092,7 +3092,7 @@ func TestServiceSched_StopAfterClientDisconnect(t *testing.T) { job := mock.Job() job.TaskGroups[0].Count = 1 job.TaskGroups[0].StopAfterClientDisconnect = &tc.stop - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Alloc for the running group alloc := mock.Alloc() @@ -3226,7 +3226,7 @@ func TestServiceSched_NodeUpdate(t *testing.T) { // Generate a fake job with allocations and an update policy. job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -3288,7 +3288,7 @@ func TestServiceSched_NodeDrain(t *testing.T) { // Generate a fake job with allocations and an update policy. job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -3366,7 +3366,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { // Generate a fake job with allocations job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -3480,7 +3480,7 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { // Generate a fake job with allocations and an update policy. job := mock.Job() job.TaskGroups[0].Count = 2 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -3539,7 +3539,7 @@ func TestServiceSched_NodeDrain_TaskHandle(t *testing.T) { // Generate a fake job with allocations and an update policy. job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -3629,7 +3629,7 @@ func TestServiceSched_RetryLimit(t *testing.T) { // Create a job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -3693,7 +3693,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -3808,7 +3808,7 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -3897,7 +3897,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -4036,7 +4036,7 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { Delay: 5 * time.Second, Unlimited: true, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -4170,7 +4170,7 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { Interval: 15 * time.Minute, } jobIndex := h.NextIndex() - require.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, jobIndex, job)) + require.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, jobIndex, nil, job)) deployment := mock.Deployment() deployment.JobID = job.ID @@ -4248,7 +4248,7 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a complete alloc alloc := mock.Alloc() @@ -4307,7 +4307,7 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) tgName := job.TaskGroups[0].Name now := time.Now() @@ -4380,7 +4380,7 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { job.ID = "my-job" job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 3 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Desired = 3 // Mark one as lost and then schedule @@ -4467,7 +4467,7 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) tgName := job.TaskGroups[0].Name now := time.Now() @@ -4525,7 +4525,7 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a successful alloc alloc := mock.Alloc() @@ -4600,7 +4600,7 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { // Generate a fake job with allocations job := mock.Job() job.Type = structs.JobTypeBatch - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -4654,7 +4654,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { // Generate a fake job with allocations job := mock.Job() job.Type = structs.JobTypeBatch - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -4674,7 +4674,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { job2.Type = structs.JobTypeBatch job2.Version++ job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"} - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) allocs = nil for i := 0; i < 10; i++ { @@ -4740,7 +4740,7 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a running alloc alloc := mock.Alloc() @@ -4755,7 +4755,7 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { job2 := job.Copy() job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"} job2.Version++ - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -4813,7 +4813,7 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a complete alloc alloc := mock.Alloc() @@ -4876,7 +4876,7 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) scoreMetric := &structs.AllocMetric{ NodesEvaluated: 10, @@ -4906,7 +4906,7 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { // Update the job's modify index to force an inplace upgrade updatedJob := job.Copy() updatedJob.JobModifyIndex = job.JobModifyIndex + 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), updatedJob)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, updatedJob)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5029,7 +5029,7 @@ func TestGenericSched_AllocFit_Lifecycle(t *testing.T) { // Create a job with sidecar & init tasks job := mock.VariableLifecycleJob(testCase.TaskResources, testCase.MainTaskCount, testCase.InitTaskCount, testCase.SideTaskCount) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5082,7 +5082,7 @@ func TestGenericSched_AllocFit_MemoryOversubscription(t *testing.T) { job.TaskGroups[0].Tasks[0].Resources.MemoryMB = 200 job.TaskGroups[0].Tasks[0].Resources.MemoryMaxMB = 500 job.TaskGroups[0].Tasks[0].Resources.DiskMB = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5126,7 +5126,7 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { // Create a job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5157,7 +5157,7 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { job1.ID = job.ID job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" job1.TaskGroups[0].Count = 12 - require.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), job1)) + require.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), nil, job1)) // Create a mock evaluation to update the job eval1 := &structs.Evaluation{ @@ -5220,7 +5220,7 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) { alloc.Job.TaskGroups[0].Count = 1 alloc.Job.TaskGroups[0].EphemeralDisk.Sticky = true alloc.DesiredTransition.Migrate = pointer.Of(true) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), alloc.Job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, alloc.Job)) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain @@ -5277,7 +5277,7 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { job.JobModifyIndex = job.CreateIndex + 1 job.ModifyIndex = job.CreateIndex + 1 job.Stop = true - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a deployment d := mock.Deployment() @@ -5350,7 +5350,7 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { // Generate a fake job job := mock.Job() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a deployment for an old version of the job d := mock.Deployment() @@ -5358,7 +5358,7 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) // Upsert again to bump job version - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to kick the job eval := &structs.Evaluation{ @@ -5705,7 +5705,7 @@ func TestServiceSched_Preemption(t *testing.T) { r1 := job1.TaskGroups[0].Tasks[0].Resources r1.CPU = 500 r1.MemoryMB = 1024 - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job1)) + require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)) job2 := mock.Job() job2.TaskGroups[0].Count = 1 @@ -5714,7 +5714,7 @@ func TestServiceSched_Preemption(t *testing.T) { r2 := job2.TaskGroups[0].Tasks[0].Resources r2.CPU = 350 r2.MemoryMB = 512 - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to register the jobs eval1 := &structs.Evaluation{ @@ -5768,7 +5768,7 @@ func TestServiceSched_Preemption(t *testing.T) { r3 := job3.TaskGroups[0].Tasks[0].Resources r3.CPU = 900 r3.MemoryMB = 1700 - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job3)) + require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job3)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -5830,7 +5830,7 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { MaxParallel: 1, Canary: 1, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) deployment := &structs.Deployment{ ID: uuid.Generate(), @@ -5905,7 +5905,7 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { MaxParallel: 1, Canary: desiredCanaries, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) deployment := &structs.Deployment{ ID: uuid.Generate(), @@ -5937,7 +5937,7 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { job2 := job.Copy() job2.Stable = false job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -6100,7 +6100,7 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { job := mock.Job() job.Version = 0 job.Stable = true - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) initDeployment := &structs.Deployment{ ID: uuid.Generate(), @@ -6129,7 +6129,7 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { nj.Version = u.version nj.TaskGroups[0].Tasks[0].Env["version"] = fmt.Sprintf("%v", u.version) nj.TaskGroups[0].Count = 1 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nj)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, nj)) desiredCanaries := 1 if !u.requireCanaries { @@ -6189,7 +6189,7 @@ func TestServiceSched_RunningWithNextAllocation(t *testing.T) { job.Stable = true job.TaskGroups[0].Count = totalCount job.TaskGroups[0].Update = nil - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for i := 0; i < totalCount+1; i++ { @@ -6210,7 +6210,7 @@ func TestServiceSched_RunningWithNextAllocation(t *testing.T) { job2 := job.Copy() job2.Version = 1 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -6310,7 +6310,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { }, } - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -6360,7 +6360,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { // Update the job to 5 instances job.TaskGroups[0].Count = 5 - require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a new eval and process it. It should not create a new plan. eval.ID = uuid.Generate() @@ -6477,7 +6477,7 @@ func TestServiceSched_CSITopology(t *testing.T) { }, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -6715,7 +6715,7 @@ func initNodeAndAllocs(t *testing.T, h *Harness, allocCount int, job := mock.Job() job.TaskGroups[0].Count = allocCount job.TaskGroups[0].MaxClientDisconnect = &maxClientDisconnect - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) allocs := make([]*structs.Allocation, allocCount) for i := 0; i < allocCount; i++ { diff --git a/scheduler/preemption_test.go b/scheduler/preemption_test.go index 838b453b6..cba1da068 100644 --- a/scheduler/preemption_test.go +++ b/scheduler/preemption_test.go @@ -1466,7 +1466,7 @@ func TestPreemptionMultiple(t *testing.T) { Name: "gpu", Count: 1, }} - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), lowPrioJob)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, lowPrioJob)) allocs := []*structs.Allocation{} allocIDs := map[string]struct{}{} @@ -1495,7 +1495,7 @@ func TestPreemptionMultiple(t *testing.T) { Name: "gpu", Count: 2, }} - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), highPrioJob)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, highPrioJob)) // schedule eval := &structs.Evaluation{ diff --git a/scheduler/scheduler_sysbatch_test.go b/scheduler/scheduler_sysbatch_test.go index 9983a5d38..212e6efa6 100644 --- a/scheduler/scheduler_sysbatch_test.go +++ b/scheduler/scheduler_sysbatch_test.go @@ -30,7 +30,7 @@ func TestSysBatch_JobRegister(t *testing.T) { // Create a job job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -100,7 +100,7 @@ func TestSysBatch_JobRegister_AddNode_Running(t *testing.T) { // Generate a fake sysbatch job with allocations job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -178,7 +178,7 @@ func TestSysBatch_JobRegister_AddNode_Dead(t *testing.T) { // Generate a dead sysbatch job with complete allocations job := mock.SystemBatchJob() job.Status = structs.JobStatusDead // job is dead but not stopped - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -255,7 +255,7 @@ func TestSysBatch_JobModify(t *testing.T) { // Generate a fake job with allocations job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -288,7 +288,7 @@ func TestSysBatch_JobModify(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -344,7 +344,7 @@ func TestSysBatch_JobModify_InPlace(t *testing.T) { nodes := createNodes(t, h, 10) job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -360,7 +360,7 @@ func TestSysBatch_JobModify_InPlace(t *testing.T) { // Update the job job2 := mock.SystemBatchJob() job2.ID = job.ID - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with update eval := &structs.Evaluation{ @@ -481,7 +481,7 @@ func TestSysBatch_JobDeregister_Stopped(t *testing.T) { // Generate a stopped sysbatch job with allocations job := mock.SystemBatchJob() job.Stop = true - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -545,7 +545,7 @@ func TestSysBatch_NodeDown(t *testing.T) { // Generate a sysbatch job allocated on that node job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.SysBatchAlloc() alloc.Job = job @@ -606,7 +606,7 @@ func TestSysBatch_NodeDrain_Down(t *testing.T) { // Generate a sysbatch job allocated on that node. job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.SysBatchAlloc() alloc.Job = job @@ -659,7 +659,7 @@ func TestSysBatch_NodeDrain(t *testing.T) { // Generate a sysbatch job allocated on that node. job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.SysBatchAlloc() alloc.Job = job @@ -716,7 +716,7 @@ func TestSysBatch_NodeUpdate(t *testing.T) { // Generate a sysbatch job allocated on that node. job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.SysBatchAlloc() alloc.Job = job @@ -760,7 +760,7 @@ func TestSysBatch_RetryLimit(t *testing.T) { // Create a job job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register eval := &structs.Evaluation{ @@ -811,7 +811,7 @@ func TestSysBatch_Queued_With_Constraints(t *testing.T) { Operand: "=", }, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -858,7 +858,7 @@ func TestSysBatch_Queued_With_Constraints_PartialMatch(t *testing.T) { // Generate a sysbatch job which can't be placed on the node job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -929,7 +929,7 @@ func TestSysBatch_JobConstraint_AddNode(t *testing.T) { // Upsert Job job.TaskGroups = []*structs.TaskGroup{tgA, tgB} - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Evaluate the job eval := &structs.Evaluation{ @@ -1053,7 +1053,7 @@ func TestSysBatch_ExistingAllocNoNodes(t *testing.T) { // Make a sysbatch job job := mock.SystemBatchJob() job.Meta = map[string]string{"version": "1"} - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Evaluate the job eval := &structs.Evaluation{ @@ -1097,7 +1097,7 @@ func TestSysBatch_ExistingAllocNoNodes(t *testing.T) { // Create a new job version, deploy job2 := job.Copy() job2.Meta["version"] = "2" - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Run evaluation as a plan eval3 := &structs.Evaluation{ @@ -1149,7 +1149,7 @@ func TestSysBatch_ConstraintErrors(t *testing.T) { Operand: "=", }) - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Evaluate the job eval := &structs.Evaluation{ @@ -1205,7 +1205,7 @@ func TestSysBatch_ChainedAlloc(t *testing.T) { // Create a sysbatch job job := mock.SystemBatchJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1236,7 +1236,7 @@ func TestSysBatch_ChainedAlloc(t *testing.T) { job1.ID = job.ID job1.TaskGroups[0].Tasks[0].Env = make(map[string]string) job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" - require.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), job1)) + require.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), nil, job1)) // Insert two more nodes for i := 0; i < 2; i++ { @@ -1314,7 +1314,7 @@ func TestSysBatch_PlanWithDrainedNode(t *testing.T) { tg2.Name = "pinger2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create an allocation on each node alloc := mock.SysBatchAlloc() @@ -1396,7 +1396,7 @@ func TestSysBatch_QueuedAllocsMultTG(t *testing.T) { tg2.Name = "pinger2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1512,7 +1512,7 @@ func TestSysBatch_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job1)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)) job2 := mock.BatchJob() job2.Type = structs.JobTypeBatch @@ -1543,7 +1543,7 @@ func TestSysBatch_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) job3 := mock.Job() job3.Type = structs.JobTypeBatch @@ -1620,7 +1620,7 @@ func TestSysBatch_Preemption(t *testing.T) { DiskMB: 2 * 1024, }, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job4)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job4)) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc4})) // Create a system job such that it would need to preempt both allocs to succeed @@ -1634,7 +1634,7 @@ func TestSysBatch_Preemption(t *testing.T) { DynamicPorts: []structs.Port{{Label: "http"}}, }}, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ diff --git a/scheduler/scheduler_system_test.go b/scheduler/scheduler_system_test.go index 0d648351b..923fe13ac 100644 --- a/scheduler/scheduler_system_test.go +++ b/scheduler/scheduler_system_test.go @@ -31,7 +31,7 @@ func TestSystemSched_JobRegister(t *testing.T) { // Create a job job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -102,7 +102,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { // Create a job job := mock.SystemJob() job.TaskGroups[0].EphemeralDisk.Sticky = true - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -178,13 +178,13 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { // Create a job job := mock.SystemJob() job.TaskGroups[0].EphemeralDisk.SizeMB = 60 * 1024 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create another job with a lot of disk resource ask so that it doesn't fit // the node job1 := mock.SystemJob() job1.TaskGroups[0].EphemeralDisk.SizeMB = 60 * 1024 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job1)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -257,7 +257,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) { svcJob := mock.Job() svcJob.TaskGroups[0].Count = 1 svcJob.TaskGroups[0].Tasks[0].Resources.CPU = 3600 - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), svcJob)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, svcJob)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -277,7 +277,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) { // Create a system job job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval1 := &structs.Evaluation{ @@ -342,7 +342,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { Operand: "==", } job.Constraints = append(job.Constraints, fooConstraint) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -425,7 +425,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { // Generate a fake job with allocations job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -503,7 +503,7 @@ func TestSystemSched_JobRegister_AllocFail(t *testing.T) { // Create NO nodes // Create a job job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -539,7 +539,7 @@ func TestSystemSched_JobModify(t *testing.T) { // Generate a fake job with allocations job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -571,7 +571,7 @@ func TestSystemSched_JobModify(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -628,7 +628,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { // Generate a fake job with allocations job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -651,7 +651,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -728,7 +728,7 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { // Generate a fake job with allocations job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -743,7 +743,7 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { // Update the job job2 := mock.SystemJob() job2.ID = job.ID - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with update eval := &structs.Evaluation{ @@ -821,7 +821,7 @@ func TestSystemSched_JobModify_RemoveDC(t *testing.T) { // Generate a fake job with allocations job := mock.SystemJob() job.Datacenters = []string{"dc1", "dc2"} - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -837,7 +837,7 @@ func TestSystemSched_JobModify_RemoveDC(t *testing.T) { // Update the job job2 := job.Copy() job2.Datacenters = []string{"dc1"} - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Create a mock evaluation to deal with update eval := &structs.Evaluation{ @@ -959,7 +959,7 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { // Generate a fake job with allocations job := mock.SystemJob() job.Stop = true - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -1023,7 +1023,7 @@ func TestSystemSched_NodeDown(t *testing.T) { // Generate a fake job allocated on that node. job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.Alloc() alloc.Job = job @@ -1084,7 +1084,7 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) { // Generate a fake job allocated on that node. job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.Alloc() alloc.Job = job @@ -1137,7 +1137,7 @@ func TestSystemSched_NodeDrain(t *testing.T) { // Generate a fake job allocated on that node. job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.Alloc() alloc.Job = job @@ -1194,7 +1194,7 @@ func TestSystemSched_NodeUpdate(t *testing.T) { // Generate a fake job allocated on that node. job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc := mock.Alloc() alloc.Job = job @@ -1238,7 +1238,7 @@ func TestSystemSched_RetryLimit(t *testing.T) { // Create a job job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1285,7 +1285,7 @@ func TestSystemSched_Queued_With_Constraints(t *testing.T) { // Generate a system job which can't be placed on the node job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -1355,7 +1355,7 @@ func TestSystemSched_JobConstraint_AddNode(t *testing.T) { // Upsert Job job.TaskGroups = []*structs.TaskGroup{tgA, tgB} - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Evaluate the job eval := &structs.Evaluation{ @@ -1476,7 +1476,7 @@ func TestSystemSched_ExistingAllocNoNodes(t *testing.T) { // Make a job job := mock.SystemJob() - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Evaluate the job eval := &structs.Evaluation{ @@ -1520,7 +1520,7 @@ func TestSystemSched_ExistingAllocNoNodes(t *testing.T) { // Create a new job version, deploy job2 := job.Copy() job2.Meta["version"] = "2" - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) // Run evaluation as a plan eval3 := &structs.Evaluation{ @@ -1572,7 +1572,7 @@ func TestSystemSched_ConstraintErrors(t *testing.T) { Operand: "=", }) - require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.Nil(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Evaluate the job eval := &structs.Evaluation{ @@ -1627,7 +1627,7 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { // Create a job job := mock.SystemJob() - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1658,7 +1658,7 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { job1.ID = job.ID job1.TaskGroups[0].Tasks[0].Env = make(map[string]string) job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" - require.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), job1)) + require.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), nil, job1)) // Insert two more nodes for i := 0; i < 2; i++ { @@ -1737,7 +1737,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { tg2.Name = "web2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create an allocation on each node alloc := mock.Alloc() @@ -1819,7 +1819,7 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { tg2.Name = "web2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1935,7 +1935,7 @@ func TestSystemSched_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job1)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1)) job2 := mock.BatchJob() job2.Type = structs.JobTypeBatch @@ -1966,7 +1966,7 @@ func TestSystemSched_Preemption(t *testing.T) { }, Shared: structs.AllocatedSharedResources{DiskMB: 5 * 1024}, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2)) job3 := mock.Job() job3.Type = structs.JobTypeBatch @@ -2043,7 +2043,7 @@ func TestSystemSched_Preemption(t *testing.T) { DiskMB: 2 * 1024, }, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job4)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job4)) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc4})) // Create a system job such that it would need to preempt both allocs to succeed @@ -2056,7 +2056,7 @@ func TestSystemSched_Preemption(t *testing.T) { DynamicPorts: []structs.Port{{Label: "http"}}, }}, } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -2897,7 +2897,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { job.Datacenters = []string{"not-targeted"} } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) alloc.Job = job.Copy() alloc.JobID = job.ID @@ -2920,7 +2920,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { if tc.jobType == structs.JobTypeSysBatch { alloc.Job.TaskGroups[0].Tasks[0].Driver = "raw_exec" } - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) } if tc.previousTerminal { @@ -3059,7 +3059,7 @@ func TestSystemSched_CSITopology(t *testing.T) { }, } - must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job)) + must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ diff --git a/scheduler/spread_test.go b/scheduler/spread_test.go index c1dd936ea..7c36e033f 100644 --- a/scheduler/spread_test.go +++ b/scheduler/spread_test.go @@ -749,7 +749,7 @@ func generateJob(jobSize int) *structs.Job { } func upsertJob(h *Harness, job *structs.Job) (*structs.Evaluation, error) { - err := h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job) + err := h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job) if err != nil { return nil, err } @@ -870,7 +870,7 @@ func TestSpreadPanicDowngrade(t *testing.T) { job1.Version = 1 job1.TaskGroups[0].Count = 5 - err := h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job1) + err := h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job1) require.NoError(t, err) allocs := []*structs.Allocation{} @@ -902,7 +902,7 @@ func TestSpreadPanicDowngrade(t *testing.T) { job2 := job1.Copy() job2.Version = 2 job2.Spreads = nil - err = h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2) + err = h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job2) require.NoError(t, err) eval := &structs.Evaluation{ diff --git a/website/content/api-docs/jobs.mdx b/website/content/api-docs/jobs.mdx index bf3619804..cccb80ab9 100644 --- a/website/content/api-docs/jobs.mdx +++ b/website/content/api-docs/jobs.mdx @@ -122,6 +122,10 @@ The table below shows this endpoint's support for - `Job` `(Job: )` - Specifies the JSON definition of the job. +- `Submission` `(JobSubmission: )` - Specifies the original HCL/HCL2/JSON + definition of the job. This data is useful for reference only, it is not considered + for the actual scheduling of `Job`. + - `EnforceIndex` `(bool: false)` - If set, the job will only be registered if the passed `JobModifyIndex` matches the current job's index. If the index is zero, the register only occurs if the job is new. This paradigm allows check-and-set @@ -235,6 +239,12 @@ The table below shows this endpoint's support for - `Canonicalize` `(bool: false)` - Flag to enable setting any unset fields to their default values. +- `Variables` `(string: "")` - Specifies HCL2 variables to use during parsing of + the job in the var file format. + +- `VariableFlags` `(map[string]string: nil)` - Specifies HCL2 variables to use + during parsing of the job in key = value format. + - `HCLv1` `(bool: false)` - Use the legacy v1 HCL parser. ### Sample Payload @@ -547,6 +557,63 @@ $ curl \ - `batch`: Allocations are intended to exit. - `system`: Each client gets an allocation. +## Read Job Submission + +This endpoint reads original source information about a specific version of a single +job. The data this endpoint provides is only available if it was provided with the +original job during job registration. Only the most recent 6 job source files are +retained. + +| Method | Path | Produces | +| ------ | ------------------------------ | ------------------ | +|`GET` | `/v1/job/:job_id/submission` | `application/json` | + +### Parameters + +- `:job_id` `(string: )` - Specifies the ID of the job. This is + specified as part of the path. +- `version` `(int: )` - Specifies the version number of the job for which + to retrieve the original source information. This is specified as a query string + parameter. +- `namespace` `(string: "default")` - Specifies the target namespace. If ACL is + enabled, this value must match a namespace that the token is allowed to access. + This is specified as a query string parameter. + +### Sample Request + +```shell-session +nomad operator api /v1/job/my-job/submission?version=42 +``` + +### Sample Response + +```json +{ + "Format": "hcl2", + "JobIndex": 11, + "JobID": "myjob", + "Namespace": "default", + "Source": "variable \"X\" {\n type = string\n}\n\nvariable \"Y\" {\n type = number\n}\n\nvariable \"Z\" {\n type = bool\n}\n \njob \"myjob\" {\n type = \"sysbatch\"\n \n meta {\n nomad_discard_job_source = false\n }\n\n group \"group\" {\n task \"task\" {\n driver = \"raw_exec\"\n\n config {\n command = \"echo\"\n args = [\"X ${var.X}, Y ${var.Y}, Z ${var.Z}\"]\n }\n\n resources {\n cpu = 10\n memory = 16\n }\n }\n }\n}\n", + "VariableFlags": { + "Z": "true", + "X": "x", + "Y": "2" + }, + "Variables": "", + "Version": 0 +} +``` + +#### Field Reference + +- `JobID`: The ID of the job associated with the original job file. +- `Format`: The file format of the original job file. One of `hcl2`, `hcl1`, or `json`. +- `Source`: The literal content of the original job file. +- `VariableFlags`: The key-value pairs of HCL variables as submitted via `-var` command + line arguments when submitting the job via CLI. +- `Variables`: The content of the variables form when submitting the job via the WebUI. +- `Version`: The version of the job this submission source is associated with. + ## List Job Versions This endpoint reads information about all versions of a job. @@ -762,8 +829,7 @@ $ curl \ https://localhost:4646/v1/job/my-job/versions?diffs=true ``` -``` - +```json { "Diffs": [ { diff --git a/website/content/docs/configuration/server.mdx b/website/content/docs/configuration/server.mdx index 18b01e9ca..3582bcb73 100644 --- a/website/content/docs/configuration/server.mdx +++ b/website/content/docs/configuration/server.mdx @@ -259,6 +259,11 @@ server { - `job_default_priority` `(int: 50)` - Specifies the default priority assigned to a job. A valid value must be between `50` and `job_max_priority`. +- `job_max_source_size` `(string: "1M")` - Specifies the size limit of the associated + job source content when registering a job. Note this is not a limit on the actual + size of a job. If the limit is exceeded, the original source is simply discarded + and no error is returned from the job API. + ### Deprecated Parameters - `retry_join` `(array: [])` - Specifies a list of server addresses to