From 8a0739613abe0d35831bdfa0d5fea38effde232f Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 10:42:11 -0800 Subject: [PATCH 01/56] small env fix --- client/driver/driver_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/client/driver/driver_test.go b/client/driver/driver_test.go index 75db08163..8df6dd19b 100644 --- a/client/driver/driver_test.go +++ b/client/driver/driver_test.go @@ -158,6 +158,7 @@ func setupTaskEnv(t *testing.T, driver string) (*allocdir.TaskDir, map[string]st alloc := mock.Alloc() alloc.Job.TaskGroups[0].Tasks[0] = task alloc.Name = "Bar" + alloc.TaskResources["web"].Networks[0].DynamicPorts[0].Value = 2000 conf := testConfig() allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(conf.AllocDir, alloc.ID)) taskDir := allocDir.NewTaskDir(task.Name) From 2c48e9396383b4ce9e77134d9bfb71b5d61a6d25 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 11:57:38 -0800 Subject: [PATCH 02/56] More timing fixes --- client/task_runner_test.go | 141 +++++++++++++------------------------ 1 file changed, 49 insertions(+), 92 deletions(-) diff --git a/client/task_runner_test.go b/client/task_runner_test.go index cbe9f08da..b1afea5d7 100644 --- a/client/task_runner_test.go +++ b/client/task_runner_test.go @@ -111,6 +111,28 @@ func testTaskRunnerFromAlloc(t *testing.T, restarts bool, alloc *structs.Allocat return &taskRunnerTestCtx{upd, tr, allocDir} } +// testWaitForTaskToStart waits for the task to or fails the test +func testWaitForTaskToStart(t *testing.T, ctx *taskRunnerTestCtx) { + // Wait for the task to start + testutil.WaitForResult(func() (bool, error) { + if l := len(ctx.upd.events); l < 2 { + return false, fmt.Errorf("Expect two events; got %v", l) + } + + if ctx.upd.events[0].Type != structs.TaskReceived { + return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived) + } + + if ctx.upd.events[1].Type != structs.TaskStarted { + return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %v", err) + }) +} + func TestTaskRunner_SimpleRun(t *testing.T) { ctestutil.ExecCompatible(t) ctx := testTaskRunner(t, false) @@ -195,23 +217,8 @@ func TestTaskRunner_Destroy(t *testing.T) { ctx.tr.task.Config["args"] = []string{"1000"} go ctx.tr.Run() - testutil.WaitForResult(func() (bool, error) { - if l := len(ctx.upd.events); l != 2 { - return false, fmt.Errorf("Expect two events; got %v", l) - } - - if ctx.upd.events[0].Type != structs.TaskReceived { - return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived) - } - - if ctx.upd.events[1].Type != structs.TaskStarted { - return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + // Wait for the task to start + testWaitForTaskToStart(t, ctx) // Make sure we are collecting a few stats time.Sleep(2 * time.Second) @@ -282,7 +289,7 @@ func TestTaskRunner_Update(t *testing.T) { ctx.tr.Update(updateAlloc) - // Wait for ctx.upd.te to take place + // Wait for ctx.update to take place testutil.WaitForResult(func() (bool, error) { if ctx.tr.task == newTask { return false, fmt.Errorf("We copied the pointer! This would be very bad") @@ -321,23 +328,7 @@ func TestTaskRunner_SaveRestoreState(t *testing.T) { defer ctx.Cleanup() // Wait for the task to be running and then snapshot the state - testutil.WaitForResult(func() (bool, error) { - if l := len(ctx.upd.events); l != 2 { - return false, fmt.Errorf("Expect two events; got %v", l) - } - - if ctx.upd.events[0].Type != structs.TaskReceived { - return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived) - } - - if ctx.upd.events[1].Type != structs.TaskStarted { - return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + testWaitForTaskToStart(t, ctx) if err := ctx.tr.SaveState(); err != nil { t.Fatalf("err: %v", err) @@ -509,6 +500,7 @@ func TestTaskRunner_Download_Retries(t *testing.T) { } func TestTaskRunner_Validate_UserEnforcement(t *testing.T) { + ctestutil.ExecCompatible(t) ctx := testTaskRunner(t, false) defer ctx.Cleanup() @@ -552,11 +544,25 @@ func TestTaskRunner_RestartTask(t *testing.T) { go ctx.tr.Run() defer ctx.Cleanup() + // Wait for it to start go func() { - time.Sleep(time.Duration(testutil.TestMultiplier()*300) * time.Millisecond) + testWaitForTaskToStart(t, ctx) ctx.tr.Restart("test", "restart") - time.Sleep(time.Duration(testutil.TestMultiplier()*300) * time.Millisecond) - ctx.tr.Kill("test", "restart", false) + + // Wait for it to restart then kill + go func() { + // Wait for the task to start again + testutil.WaitForResult(func() (bool, error) { + if len(ctx.upd.events) != 7 { + t.Fatalf("should have 7 ctx.updates: %#v", ctx.upd.events) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %v", err) + }) + ctx.tr.Kill("test", "restart", false) + }() }() select { @@ -566,7 +572,7 @@ func TestTaskRunner_RestartTask(t *testing.T) { } if len(ctx.upd.events) != 9 { - t.Fatalf("should have 9 ctx.upd.tes: %#v", ctx.upd.events) + t.Fatalf("should have 9 ctx.updates: %#v", ctx.upd.events) } if ctx.upd.state != structs.TaskStateDead { @@ -593,7 +599,6 @@ func TestTaskRunner_RestartTask(t *testing.T) { t.Fatalf("Fifth Event was %v; want %v", ctx.upd.events[4].Type, structs.TaskKilled) } - t.Logf("%+v", ctx.upd.events[5]) if ctx.upd.events[5].Type != structs.TaskRestarting { t.Fatalf("Sixth Event was %v; want %v", ctx.upd.events[5].Type, structs.TaskRestarting) } @@ -625,7 +630,7 @@ func TestTaskRunner_KillTask(t *testing.T) { defer ctx.Cleanup() go func() { - time.Sleep(100 * time.Millisecond) + testWaitForTaskToStart(t, ctx) ctx.tr.Kill("test", "kill", true) }() @@ -680,23 +685,7 @@ func TestTaskRunner_SignalFailure(t *testing.T) { defer ctx.Cleanup() // Wait for the task to start - testutil.WaitForResult(func() (bool, error) { - if l := len(ctx.upd.events); l < 2 { - return false, fmt.Errorf("Expect two events; got %v", l) - } - - if ctx.upd.events[0].Type != structs.TaskReceived { - return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived) - } - - if ctx.upd.events[1].Type != structs.TaskStarted { - return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + testWaitForTaskToStart(t, ctx) if err := ctx.tr.Signal("test", "test", syscall.SIGINT); err == nil { t.Fatalf("Didn't receive error") @@ -1125,23 +1114,7 @@ func TestTaskRunner_VaultManager_Restart(t *testing.T) { go ctx.tr.Run() // Wait for the task to start - testutil.WaitForResult(func() (bool, error) { - if l := len(ctx.upd.events); l != 2 { - return false, fmt.Errorf("Expect two events; got %v", l) - } - - if ctx.upd.events[0].Type != structs.TaskReceived { - return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived) - } - - if ctx.upd.events[1].Type != structs.TaskStarted { - return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + testWaitForTaskToStart(t, ctx) // Error the token renewal vc := ctx.tr.vaultClient.(*vaultclient.MockVaultClient) @@ -1213,23 +1186,7 @@ func TestTaskRunner_VaultManager_Signal(t *testing.T) { defer ctx.Cleanup() // Wait for the task to start - testutil.WaitForResult(func() (bool, error) { - if l := len(ctx.upd.events); l != 2 { - return false, fmt.Errorf("Expect two events; got %v", l) - } - - if ctx.upd.events[0].Type != structs.TaskReceived { - return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived) - } - - if ctx.upd.events[1].Type != structs.TaskStarted { - return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + testWaitForTaskToStart(t, ctx) // Error the token renewal vc := ctx.tr.vaultClient.(*vaultclient.MockVaultClient) From f57ba322fa4e6ed19176c968ae9a34d7769f28fe Mon Sep 17 00:00:00 2001 From: taylorchu Date: Wed, 25 Jan 2017 16:19:39 -0800 Subject: [PATCH 03/56] BUGFIX: fix consul verify_ssl merging --- nomad/structs/config/consul.go | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/nomad/structs/config/consul.go b/nomad/structs/config/consul.go index ee47a793a..468b8a084 100644 --- a/nomad/structs/config/consul.go +++ b/nomad/structs/config/consul.go @@ -121,7 +121,7 @@ func (a *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig { result.EnableSSL = b.EnableSSL } if b.VerifySSL != nil { - result.VerifySSL = b.EnableSSL + result.VerifySSL = b.VerifySSL } if b.CAFile != "" { result.CAFile = b.CAFile @@ -180,19 +180,21 @@ func (c *ConsulConfig) ApiConfig() (*consul.Config, error) { if c.VerifySSL != nil { tlsConfig.InsecureSkipVerify = !*c.VerifySSL } - tlsClientCfg, err := consul.SetupTLSConfig(&tlsConfig) - if err != nil { - return nil, fmt.Errorf("error creating tls client config for consul: %v", err) - } - config.HttpClient.Transport = &http.Transport{ - TLSClientConfig: tlsClientCfg, - } - } - if c.EnableSSL != nil && !*c.VerifySSL { - config.HttpClient.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, + + if tlsConfig.InsecureSkipVerify { + config.HttpClient.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + } else { + tlsClientCfg, err := consul.SetupTLSConfig(&tlsConfig) + if err != nil { + return nil, fmt.Errorf("error creating tls client config for consul: %v", err) + } + config.HttpClient.Transport = &http.Transport{ + TLSClientConfig: tlsClientCfg, + } } } From c920d3cbb04e88ff549edf0afba78a0f6ae0adfe Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 20:58:24 -0800 Subject: [PATCH 04/56] Fix unreachable function in tests --- client/driver/executor/checks_linux_test.go | 55 +++++++++++++++++++ client/driver/executor/checks_test.go | 46 ---------------- client/driver/executor/executor_linux_test.go | 52 ++++++++++++++++++ client/driver/executor/executor_test.go | 52 ------------------ 4 files changed, 107 insertions(+), 98 deletions(-) create mode 100644 client/driver/executor/checks_linux_test.go diff --git a/client/driver/executor/checks_linux_test.go b/client/driver/executor/checks_linux_test.go new file mode 100644 index 000000000..b825dbc26 --- /dev/null +++ b/client/driver/executor/checks_linux_test.go @@ -0,0 +1,55 @@ +package executor + +import ( + "log" + "os" + "strings" + "testing" + + "github.com/hashicorp/nomad/client/testutil" +) + +func TestExecScriptCheckWithIsolation(t *testing.T) { + testutil.ExecCompatible(t) + + execCmd := ExecCommand{Cmd: "/bin/echo", Args: []string{"hello world"}} + ctx, allocDir := testExecutorContextWithChroot(t) + defer allocDir.Destroy() + + execCmd.FSIsolation = true + execCmd.ResourceLimits = true + execCmd.User = dstructs.DefaultUnpriviledgedUser + + executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)) + + if err := executor.SetContext(ctx); err != nil { + t.Fatalf("Unexpected error") + } + + _, err := executor.LaunchCmd(&execCmd) + if err != nil { + t.Fatalf("error in launching command: %v", err) + } + + check := &ExecScriptCheck{ + id: "foo", + cmd: "/bin/echo", + args: []string{"hello", "world"}, + taskDir: ctx.TaskDir, + FSIsolation: true, + } + + res := check.Run() + expectedOutput := "hello world" + expectedExitCode := 0 + if res.Err != nil { + t.Fatalf("err: %v", res.Err) + } + if strings.TrimSpace(res.Output) != expectedOutput { + t.Fatalf("output expected: %v, actual: %v", expectedOutput, res.Output) + } + + if res.ExitCode != expectedExitCode { + t.Fatalf("exitcode expected: %v, actual: %v", expectedExitCode, res.ExitCode) + } +} diff --git a/client/driver/executor/checks_test.go b/client/driver/executor/checks_test.go index 2a2cbdb3b..3907191f9 100644 --- a/client/driver/executor/checks_test.go +++ b/client/driver/executor/checks_test.go @@ -9,7 +9,6 @@ import ( docker "github.com/fsouza/go-dockerclient" - dstructs "github.com/hashicorp/nomad/client/driver/structs" "github.com/hashicorp/nomad/client/testutil" ) @@ -37,51 +36,6 @@ func TestExecScriptCheckNoIsolation(t *testing.T) { } } -func TestExecScriptCheckWithIsolation(t *testing.T) { - testutil.ExecCompatible(t) - - execCmd := ExecCommand{Cmd: "/bin/echo", Args: []string{"hello world"}} - ctx, allocDir := testExecutorContextWithChroot(t) - defer allocDir.Destroy() - - execCmd.FSIsolation = true - execCmd.ResourceLimits = true - execCmd.User = dstructs.DefaultUnpriviledgedUser - - executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)) - - if err := executor.SetContext(ctx); err != nil { - t.Fatalf("Unexpected error") - } - - _, err := executor.LaunchCmd(&execCmd) - if err != nil { - t.Fatalf("error in launching command: %v", err) - } - - check := &ExecScriptCheck{ - id: "foo", - cmd: "/bin/echo", - args: []string{"hello", "world"}, - taskDir: ctx.TaskDir, - FSIsolation: true, - } - - res := check.Run() - expectedOutput := "hello world" - expectedExitCode := 0 - if res.Err != nil { - t.Fatalf("err: %v", res.Err) - } - if strings.TrimSpace(res.Output) != expectedOutput { - t.Fatalf("output expected: %v, actual: %v", expectedOutput, res.Output) - } - - if res.ExitCode != expectedExitCode { - t.Fatalf("exitcode expected: %v, actual: %v", expectedExitCode, res.ExitCode) - } -} - func TestDockerScriptCheck(t *testing.T) { if !testutil.DockerIsConnected(t) { return diff --git a/client/driver/executor/executor_linux_test.go b/client/driver/executor/executor_linux_test.go index 751de540a..e60aa5758 100644 --- a/client/driver/executor/executor_linux_test.go +++ b/client/driver/executor/executor_linux_test.go @@ -8,6 +8,7 @@ import ( "strconv" "strings" "testing" + "time" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/driver/env" @@ -136,3 +137,54 @@ ld.so.conf.d/` t.Fatalf("Command output incorrectly: want %v; got %v", expected, act) } } + +func TestExecutor_ClientCleanup(t *testing.T) { + testutil.ExecCompatible(t) + + ctx, allocDir := testExecutorContextWithChroot(t) + ctx.Task.LogConfig.MaxFiles = 1 + ctx.Task.LogConfig.MaxFileSizeMB = 300 + defer allocDir.Destroy() + + executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)) + + if err := executor.SetContext(ctx); err != nil { + t.Fatalf("Unexpected error") + } + + // Need to run a command which will produce continuous output but not + // too quickly to ensure executor.Exit() stops the process. + execCmd := ExecCommand{Cmd: "/bin/bash", Args: []string{"-c", "while true; do /bin/echo X; /bin/sleep 1; done"}} + execCmd.FSIsolation = true + execCmd.ResourceLimits = true + execCmd.User = "nobody" + + ps, err := executor.LaunchCmd(&execCmd) + if err != nil { + t.Fatalf("error in launching command: %v", err) + } + if ps.Pid == 0 { + t.Fatalf("expected process to start and have non zero pid") + } + time.Sleep(500 * time.Millisecond) + if err := executor.Exit(); err != nil { + t.Fatalf("err: %v", err) + } + + file := filepath.Join(ctx.LogDir, "web.stdout.0") + finfo, err := os.Stat(file) + if err != nil { + t.Fatalf("error stating stdout file: %v", err) + } + if finfo.Size() == 0 { + t.Fatal("Nothing in stdout; expected at least one byte.") + } + time.Sleep(2 * time.Second) + finfo1, err := os.Stat(file) + if err != nil { + t.Fatalf("error stating stdout file: %v", err) + } + if finfo.Size() != finfo1.Size() { + t.Fatalf("Expected size: %v, actual: %v", finfo.Size(), finfo1.Size()) + } +} diff --git a/client/driver/executor/executor_test.go b/client/driver/executor/executor_test.go index d9af1eb0c..ae77f058e 100644 --- a/client/driver/executor/executor_test.go +++ b/client/driver/executor/executor_test.go @@ -14,7 +14,6 @@ import ( "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/driver/env" cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" tu "github.com/hashicorp/nomad/testutil" @@ -187,57 +186,6 @@ func TestExecutor_WaitExitSignal(t *testing.T) { } } -func TestExecutor_ClientCleanup(t *testing.T) { - testutil.ExecCompatible(t) - - ctx, allocDir := testExecutorContextWithChroot(t) - ctx.Task.LogConfig.MaxFiles = 1 - ctx.Task.LogConfig.MaxFileSizeMB = 300 - defer allocDir.Destroy() - - executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)) - - if err := executor.SetContext(ctx); err != nil { - t.Fatalf("Unexpected error") - } - - // Need to run a command which will produce continuous output but not - // too quickly to ensure executor.Exit() stops the process. - execCmd := ExecCommand{Cmd: "/bin/bash", Args: []string{"-c", "while true; do /bin/echo X; /bin/sleep 1; done"}} - execCmd.FSIsolation = true - execCmd.ResourceLimits = true - execCmd.User = "nobody" - - ps, err := executor.LaunchCmd(&execCmd) - if err != nil { - t.Fatalf("error in launching command: %v", err) - } - if ps.Pid == 0 { - t.Fatalf("expected process to start and have non zero pid") - } - time.Sleep(500 * time.Millisecond) - if err := executor.Exit(); err != nil { - t.Fatalf("err: %v", err) - } - - file := filepath.Join(ctx.LogDir, "web.stdout.0") - finfo, err := os.Stat(file) - if err != nil { - t.Fatalf("error stating stdout file: %v", err) - } - if finfo.Size() == 0 { - t.Fatal("Nothing in stdout; expected at least one byte.") - } - time.Sleep(2 * time.Second) - finfo1, err := os.Stat(file) - if err != nil { - t.Fatalf("error stating stdout file: %v", err) - } - if finfo.Size() != finfo1.Size() { - t.Fatalf("Expected size: %v, actual: %v", finfo.Size(), finfo1.Size()) - } -} - func TestExecutor_Start_Kill(t *testing.T) { execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"10 && hello world"}} ctx, allocDir := testExecutorContext(t) From 0e8b43767b4d287aa5b71e1847634e5aa53606b0 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 21:27:36 -0800 Subject: [PATCH 05/56] Fix import --- client/driver/executor/checks_linux_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/client/driver/executor/checks_linux_test.go b/client/driver/executor/checks_linux_test.go index b825dbc26..3affd0e08 100644 --- a/client/driver/executor/checks_linux_test.go +++ b/client/driver/executor/checks_linux_test.go @@ -6,6 +6,7 @@ import ( "strings" "testing" + dstructs "github.com/hashicorp/nomad/client/driver/structs" "github.com/hashicorp/nomad/client/testutil" ) From 715e576dfb9fdc1911065d0016d565402629d00b Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 13:48:33 -0800 Subject: [PATCH 06/56] Dispatch command --- command/job_dispatch.go | 6 + .../docs/commands/job-dispatch.html.md.erb | 106 ++++++++++++++++++ .../source/docs/commands/status.html.md.erb | 2 + website/source/layouts/docs.erb | 3 + 4 files changed, 117 insertions(+) create mode 100644 website/source/docs/commands/job-dispatch.html.md.erb diff --git a/command/job_dispatch.go b/command/job_dispatch.go index 39e92a9f7..763260ec8 100644 --- a/command/job_dispatch.go +++ b/command/job_dispatch.go @@ -32,6 +32,12 @@ General Options: Dispatch Options: + -meta = + Meta takes a key/value pair seperated by "=". The metadata key will be + injected into the job's metadata. The flag can be provided more than + once to inject multiple metadata key/value pairs. Arbitrary injection is not + allowed. The parameterized job must allow the key to be injected. + -detach Return immediately instead of entering monitor mode. After job dispatch, the evaluation ID will be printed to the screen, which can be used to diff --git a/website/source/docs/commands/job-dispatch.html.md.erb b/website/source/docs/commands/job-dispatch.html.md.erb new file mode 100644 index 000000000..551aa2d45 --- /dev/null +++ b/website/source/docs/commands/job-dispatch.html.md.erb @@ -0,0 +1,106 @@ +--- +layout: "docs" +page_title: "Commands: job dispatch" +sidebar_current: "docs-commands-job-dispatch" +description: > + The dispatch command is used to create an instance of a parameterized job. +--- + +# Command: job dispatch + +~> The [`job dispatch`](/docs/commands/snapshot/agent.html) subcommand described here is + available only in version 0.5.3 and later. The release canidate is + downloadable on the [releases + page.](https://releases.hashicorp.com/nomad/0.5.3-rc1/) + +The `job dispatch` command is used to create new instances of a [parameterized +job](TODO). The parameterized job captures a job's configuration and runtime +requirements in a generic way and `dispatch` is used to provide the input for +the job to run against. One can think of the parameterized job as a function +definition and dispatch is used to invoke the function. + +## Usage + +``` +nomad job dispatch [options] [input source] +``` + +Dispatch creates an instance of a parameterized job. A data payload to the +dispatched instance can be provided via stdin by using "-" for the input source +or by specifiying a path to a file. Metadata can be supplied by using the meta +flag one or more times. + +Upon successfully creation, the dispatched job ID will be printed and the +triggered evaluation will be monitored. This can be disabled by supplying the +detach flag. + +On successful job submission and scheduling, exit code 0 will be returned. If +there are job placement issues encountered (unsatisfiable constraints, resource +exhaustion, etc), then the exit code will be 2. Any other errors, including +client connection issues or internal errors, are indicated by exit code 1. + +## General Options + +<%= partial "docs/commands/_general_options" %> + +## Run Options + +* `-meta`: Meta takes a key/value pair seperated by "=". The metadata key will + be injected into the job's metadata. The flag can be provided more than once + to inject multiple metadata key/value pairs. Arbitrary injection is not + allowed. The parameterized job must allow the key to be injected. + +* `-detach`: Return immediately instead of monitoring. A new evaluation ID + will be output, which can be used to examine the evaluation using the + [eval-status](/docs/commands/eval-status.html) command + +* `-verbose`: Show full information. + +## Examples + +Dispatch against a parameterized job with the ID "video-encode" and +passing in a configuration payload via stdin: + +``` +$ cat << EOF | nomad job dispatch video-encode - +{ + "s3-input": "https://s3-us-west-1.amazonaws.com/video-bucket/cb31dabb1", + "s3-output": "https://s3-us-west-1.amazonaws.com/video-bucket/a149adbe3", + "input-codec": "mp4", + "output-codec": "webm", + "quality": "1080p" +} +EOF +Dispatched Job ID = video-encode/dispatch-1485379325-cb38d00d +Evaluation ID = 31199841 + +==> Monitoring evaluation "31199841" + Evaluation triggered by job "example/dispatch-1485379325-cb38d00d" + Allocation "8254b85f" created: node "82ff9c50", group "cache" + Evaluation status changed: "pending" -> "complete" +==> Evaluation "31199841" finished with status "complete" +``` + +Dispatch against a parameterized job with the ID "video-encode" and +passing in a configuration payload via a file: + +``` +$ nomad job dispatch video-encode video-config.json +Dispatched Job ID = video-encode/dispatch-1485379325-cb38d00d +Evaluation ID = 31199841 + +==> Monitoring evaluation "31199841" + Evaluation triggered by job "example/dispatch-1485379325-cb38d00d" + Allocation "8254b85f" created: node "82ff9c50", group "cache" + Evaluation status changed: "pending" -> "complete" +==> Evaluation "31199841" finished with status "complete" +``` + +Dispatch against a parameterized job with the ID "video-encode" using the detach +flag: + +``` +$ nomad job dispatch -detach video-encode video-config.json +Dispatched Job ID = example/dispatch-1485380684-c37b3dba +Evaluation ID = d9034c4e +``` diff --git a/website/source/docs/commands/status.html.md.erb b/website/source/docs/commands/status.html.md.erb index 80e3e046d..0fb643ba0 100644 --- a/website/source/docs/commands/status.html.md.erb +++ b/website/source/docs/commands/status.html.md.erb @@ -152,3 +152,5 @@ b82f58b6 8bf94335 8d0331e9 cache run running 08/08/16 21:03:19 CD ed3665f5 8bf94335 8d0331e9 cache run running 08/08/16 21:03:19 CDT 24cfd201 8bf94335 8d0331e9 cache run running 08/08/16 21:03:19 CDT ``` + +TODO: CRON AND DISPATCH diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 0fb9bd356..dad25d2b3 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -204,6 +204,9 @@ > inspect + > + job dispatch + > keygen From 78bfc6a4af0d1946884a3101e0ec39ffd4d437a7 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 19:14:58 -0800 Subject: [PATCH 07/56] Parameterized page --- .../docs/commands/job-dispatch.html.md.erb | 8 +- .../job-specification/parameterized.html.md | 159 ++++++++++++++++++ .../docs/job-specification/periodic.html.md | 5 + 3 files changed, 171 insertions(+), 1 deletion(-) create mode 100644 website/source/docs/job-specification/parameterized.html.md diff --git a/website/source/docs/commands/job-dispatch.html.md.erb b/website/source/docs/commands/job-dispatch.html.md.erb index 551aa2d45..cd5b24ffb 100644 --- a/website/source/docs/commands/job-dispatch.html.md.erb +++ b/website/source/docs/commands/job-dispatch.html.md.erb @@ -14,11 +14,15 @@ description: > page.](https://releases.hashicorp.com/nomad/0.5.3-rc1/) The `job dispatch` command is used to create new instances of a [parameterized -job](TODO). The parameterized job captures a job's configuration and runtime +job]. The parameterized job captures a job's configuration and runtime requirements in a generic way and `dispatch` is used to provide the input for the job to run against. One can think of the parameterized job as a function definition and dispatch is used to invoke the function. +Each time a job is dispatched, a unique job ID is generated. This allows a +caller to track the status of the job, much like a future or promise in some +programming languages. + ## Usage ``` @@ -104,3 +108,5 @@ $ nomad job dispatch -detach video-encode video-config.json Dispatched Job ID = example/dispatch-1485380684-c37b3dba Evaluation ID = d9034c4e ``` + +[parameterized job]: /docs/job-specification/parameterized.html "Nomad parameterized Job Specification" diff --git a/website/source/docs/job-specification/parameterized.html.md b/website/source/docs/job-specification/parameterized.html.md new file mode 100644 index 000000000..72ea3e297 --- /dev/null +++ b/website/source/docs/job-specification/parameterized.html.md @@ -0,0 +1,159 @@ +--- +layout: "docs" +page_title: "parameterized Stanza - Job Specification" +sidebar_current: "docs-job-specification-parameterized" +description: |- + A parameterized job is used to encapsulate a set of work that can be carried + out on various inputs much like a function definition. When the + `parameterized` stanza is added to a job, the job acts as a function to the + cluster as a whole. +--- + +# `parameterized` Stanza + + + + + + +
Placement + job -> **parameterized** +
+ +A parameterized job is used to encapsulate a set of work that can be carried out +on various inputs much like a function definition. When the `parameterized` +stanza is added to a job, the job acts as a function to the cluster as a whole. + +The `parameterized` stanza allows job operators to configure a job that carries +out a particular action, define its resource requirements and configure how +inputs and configuration are retreived by the tasks within the job. + +To invoke a parameterized job, [`nomad job +dispatch`][dispatch command] or the equivalent HTTP APIs are +used. When dispatching against a parameterized job, an opaque payload and +metadata may be injected into the job. These inputs to the parameterized job act +like arguments to a function. The job consumes them to change it's behavior, +without exposing the implementation details to the caller. + +To that end, tasks within the job can add a [`dispatch_input`][dispatch_input] stanza that +defines where on the filesystem this payload gets written to. An example payload +would be a task's JSON configuration. + +Further, certain metadata may be marked as required when dispatching a job so it +can be used to inject configuration directly into a task's arguments using +[interpolation]. An example of this would be to require a run ID key that +could be used to lookup the work the job is suppose to do from a management +service or database. + +Each time a job is dispatched, a unique job ID is generated. This allows a +caller to track the status of the job, much like a future or promise in some +programming languages. + +```hcl +job "docs" { + parameterized { + payload = "required" + required_meta = ["dispatcher_email"] + optional_meta = ["pager_email"] + } +} +``` + +## `parameterized` Requirements + + - The job's [scheduler type][batch-type] must be `batch`. + +## `parameterized` Parameters + +- `payload` `(string: "optional")` - Specifies the requirement of providing a + payload when dispatching against the parameterized job. The options for this + field are: + + - `"optional"` - A payload is optional when dispatching against the job. + + - `"required"` - A payload must be provided when dispatching against the job. + + - `"forbidden"` - A payload is forbidden when dispatching against the job. + +- `required_meta` `([]string: nil)` - Specifies the set of metadata keys that + must be provided when dispatching against the job. + +- `optional_meta` `([]string: nil)` - Specifies the set of metadata keys that + may be provided when dispatching against the job. + +## `parameterized` Examples + +The following examples show non-runnable example parameterized jobs: + +### Required Inputs + +This example shows a parameterized job that requires both a payload and +metadata: + +```hcl +job "video-encode" { + ... + type = "batch" + + parameterized { + payload = "required" + required_meta = ["dispatcher_email"] + } + + group "encode" { + ... + + task "ffmpeg" { + driver = "exec" + + config { + command = "ffmpeg-wrapper" + + # When dispatched, the payload is written to a file that is then read by + # the created task upon startup + args = ["-config=${NOMAD_TASK_DIR}/config.json"] + } + + dispatch_input { + file = "config.json" + } + } + } +} +``` + +### Metadata Interpolation + +```hcl +job "email-blast" { + ... + type = "batch" + + parameterized { + payload = "forbidden" + required_meta = ["CAMPAIGN_ID"] + } + + group "emails" { + ... + + task "emailer" { + driver = "exec" + + config { + command = "emailer" + + # The campagain ID is interpolated and injected into the task's + # arguments + args = ["-campaign=${NOMAD_META_CAMPAIGN_ID}"] + } + } + } +} +``` + +[batch-type]: /docs/job-specification/job.html#type "Batch scheduler type" +[dispatch command]: /docs/commands/job-dispatch.html "Nomad Job Dispatch Command" +[resources]: /docs/job-specification/resources.html "Nomad resources Job Specification" +[interpolation]: /docs/runtime/interpolation.html "Nomad Runtime Interpolation" +[dispatch_input]: /docs/job-specification/dispatch-input.html "Nomad dispatch_input Job Specification" diff --git a/website/source/docs/job-specification/periodic.html.md b/website/source/docs/job-specification/periodic.html.md index 8b010c149..4f480448f 100644 --- a/website/source/docs/job-specification/periodic.html.md +++ b/website/source/docs/job-specification/periodic.html.md @@ -35,6 +35,10 @@ job "docs" { The periodic expression is always evaluated in the **UTC timezone** to ensure consistent evaluation when Nomad spans multiple time zones. +## `periodic` Requirements + + - The job's [scheduler type][batch-type] must be `batch`. + ## `periodic` Parameters - `cron` `(string: )` - Specifies a cron expression configuring the @@ -60,4 +64,5 @@ periodic { } ``` +[batch-type]: /docs/job-specification/job.html#type "Batch scheduler type" [cron]: https://github.com/gorhill/cronexpr#implementation "List of cron expressions" From b456e7fff1a2f8a9ac9b2449ae9db93b78c0d92c Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 20:49:06 -0800 Subject: [PATCH 08/56] dispatch_payload --- .../dispatch-payload.html.md | 62 +++++++++++++++++++ .../job-specification/parameterized.html.md | 7 ++- website/source/layouts/docs.erb | 6 ++ 3 files changed, 72 insertions(+), 3 deletions(-) create mode 100644 website/source/docs/job-specification/dispatch-payload.html.md diff --git a/website/source/docs/job-specification/dispatch-payload.html.md b/website/source/docs/job-specification/dispatch-payload.html.md new file mode 100644 index 000000000..70fed94d6 --- /dev/null +++ b/website/source/docs/job-specification/dispatch-payload.html.md @@ -0,0 +1,62 @@ +--- +layout: "docs" +page_title: "dispatch_payload Stanza - Job Specification" +sidebar_current: "docs-job-specification-dispatch-payload" +description: |- + The "dispatch_payload" stanza allows a task to access dispatch payloads. + to +--- + +# `dispatch_payload` Stanza + + + + + + +
Placement + job -> group -> task -> **dispatch_payload** +
+ +The `dispatch_payload` stanza is used in conjuction with a [`paramterized`][parameterized] job +that expects a payload. When the job is dispatched with a payload, the payload +will be made available to any task that has a `dispatch_payload` stanza. The +payload will be written to the configured file before the task is started. This +allows the task to use the payload as input or configuration. + +```hcl +job "docs" { + group "example" { + task "server" { + dispatch_payload { + file = "config.json" + } + } + } +} +``` + +## `dispatch_payload` Parameters + +- `file` `(string: "")` - Specifies the file name to write the content of + dispatch payload to. The file is written to a [task's local + directory][localdir]. + +## `dispatch_payload` Examples + +The following examples only show the `dispatch_payload` stanzas. Remember that the +`dispatch_payload` stanza is only valid in the placements listed above. + +### Write Payload to a File + +This example shows a `dispatch_payload` block in a parameterized job that writes +the payload to a `config.json` file. + +```hcl +dispatch_payload { + file = "config.json" +} +``` + +[localdir]: /docs/runtime/environment.html#local_ "Task Local Directory" +[parameterized]: /docs/job-specification/parameterized.html "Nomad parameterized Job Specification" diff --git a/website/source/docs/job-specification/parameterized.html.md b/website/source/docs/job-specification/parameterized.html.md index 72ea3e297..ee33867ce 100644 --- a/website/source/docs/job-specification/parameterized.html.md +++ b/website/source/docs/job-specification/parameterized.html.md @@ -35,7 +35,8 @@ metadata may be injected into the job. These inputs to the parameterized job act like arguments to a function. The job consumes them to change it's behavior, without exposing the implementation details to the caller. -To that end, tasks within the job can add a [`dispatch_input`][dispatch_input] stanza that +To that end, tasks within the job can add a +[`dispatch_payload`][dispatch_payload] stanza that defines where on the filesystem this payload gets written to. An example payload would be a task's JSON configuration. @@ -114,7 +115,7 @@ job "video-encode" { args = ["-config=${NOMAD_TASK_DIR}/config.json"] } - dispatch_input { + dispatch_payload { file = "config.json" } } @@ -156,4 +157,4 @@ job "email-blast" { [dispatch command]: /docs/commands/job-dispatch.html "Nomad Job Dispatch Command" [resources]: /docs/job-specification/resources.html "Nomad resources Job Specification" [interpolation]: /docs/runtime/interpolation.html "Nomad Runtime Interpolation" -[dispatch_input]: /docs/job-specification/dispatch-input.html "Nomad dispatch_input Job Specification" +[dispatch_payload]: /docs/job-specification/dispatch-payload.html "Nomad dispatch_payload Job Specification" diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index dad25d2b3..f3b00a5fa 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -48,6 +48,9 @@ > constraint + > + dispatch_payload + > env @@ -69,6 +72,9 @@ > network + > + parameterized + > periodic From 8f8215c09365e07ce6278a249b9a168b7b328444 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 20:51:00 -0800 Subject: [PATCH 09/56] Fix bad link --- website/source/docs/commands/job-dispatch.html.md.erb | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/website/source/docs/commands/job-dispatch.html.md.erb b/website/source/docs/commands/job-dispatch.html.md.erb index cd5b24ffb..cce195491 100644 --- a/website/source/docs/commands/job-dispatch.html.md.erb +++ b/website/source/docs/commands/job-dispatch.html.md.erb @@ -8,10 +8,9 @@ description: > # Command: job dispatch -~> The [`job dispatch`](/docs/commands/snapshot/agent.html) subcommand described here is - available only in version 0.5.3 and later. The release canidate is - downloadable on the [releases - page.](https://releases.hashicorp.com/nomad/0.5.3-rc1/) +~> The `job dispatch` subcommand described here is available only in version +0.5.3 and later. The release canidate is downloadable on the [releases +page.](https://releases.hashicorp.com/nomad/0.5.3-rc1/) The `job dispatch` command is used to create new instances of a [parameterized job]. The parameterized job captures a job's configuration and runtime From 314435261dc609016507caaed64e36d8937c277e Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 21:06:16 -0800 Subject: [PATCH 10/56] Rename dispatch_input to dispatch_payload --- api/tasks.go | 34 ++++++++++----------- client/task_runner.go | 4 +-- client/task_runner_test.go | 2 +- jobspec/parse.go | 16 +++++----- jobspec/parse_test.go | 2 +- jobspec/test-fixtures/parameterized_job.hcl | 2 +- nomad/structs/diff.go | 4 +-- nomad/structs/diff_test.go | 28 ++++++++--------- nomad/structs/structs.go | 24 +++++++-------- nomad/structs/structs_test.go | 4 +-- 10 files changed, 60 insertions(+), 60 deletions(-) diff --git a/api/tasks.go b/api/tasks.go index 7ba6ea0dd..b6b184072 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -141,28 +141,28 @@ type LogConfig struct { MaxFileSizeMB int } -// DispatchInputConfig configures how a task gets its input from a job dispatch -type DispatchInputConfig struct { +// DispatchPayloadConfig configures how a task gets its input from a job dispatch +type DispatchPayloadConfig struct { File string } // Task is a single process in a task group. type Task struct { - Name string - Driver string - User string - Config map[string]interface{} - Constraints []*Constraint - Env map[string]string - Services []Service - Resources *Resources - Meta map[string]string - KillTimeout time.Duration - LogConfig *LogConfig - Artifacts []*TaskArtifact - Vault *Vault - Templates []*Template - DispatchInput *DispatchInputConfig + Name string + Driver string + User string + Config map[string]interface{} + Constraints []*Constraint + Env map[string]string + Services []Service + Resources *Resources + Meta map[string]string + KillTimeout time.Duration + LogConfig *LogConfig + Artifacts []*TaskArtifact + Vault *Vault + Templates []*Template + DispatchPayload *DispatchPayloadConfig } // TaskArtifact is used to download artifacts before running a task. diff --git a/client/task_runner.go b/client/task_runner.go index 61ca21ccc..e872e434d 100644 --- a/client/task_runner.go +++ b/client/task_runner.go @@ -753,9 +753,9 @@ func (r *TaskRunner) prestart(resultCh chan bool) { // If the job is a dispatch job and there is a payload write it to disk requirePayload := len(r.alloc.Job.Payload) != 0 && - (r.task.DispatchInput != nil && r.task.DispatchInput.File != "") + (r.task.DispatchPayload != nil && r.task.DispatchPayload.File != "") if !r.payloadRendered && requirePayload { - renderTo := filepath.Join(r.taskDir.LocalDir, r.task.DispatchInput.File) + renderTo := filepath.Join(r.taskDir.LocalDir, r.task.DispatchPayload.File) decoded, err := snappy.Decode(nil, r.alloc.Job.Payload) if err != nil { r.setState( diff --git a/client/task_runner_test.go b/client/task_runner_test.go index b1afea5d7..e7e2b19db 100644 --- a/client/task_runner_test.go +++ b/client/task_runner_test.go @@ -1232,7 +1232,7 @@ func TestTaskRunner_SimpleRun_Dispatch(t *testing.T) { "run_for": "1s", } fileName := "test" - task.DispatchInput = &structs.DispatchInputConfig{ + task.DispatchPayload = &structs.DispatchPayloadConfig{ File: fileName, } alloc.Job.ParameterizedJob = &structs.ParameterizedJobConfig{} diff --git a/jobspec/parse.go b/jobspec/parse.go index a4fc88e15..7c4215698 100644 --- a/jobspec/parse.go +++ b/jobspec/parse.go @@ -564,7 +564,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l "artifact", "config", "constraint", - "dispatch_input", + "dispatch_payload", "driver", "env", "kill_timeout", @@ -587,7 +587,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l delete(m, "artifact") delete(m, "config") delete(m, "constraint") - delete(m, "dispatch_input") + delete(m, "dispatch_payload") delete(m, "env") delete(m, "logs") delete(m, "meta") @@ -747,10 +747,10 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l t.Vault = v } - // If we have a dispatch_input block parse that - if o := listVal.Filter("dispatch_input"); len(o.Items) > 0 { + // If we have a dispatch_payload block parse that + if o := listVal.Filter("dispatch_payload"); len(o.Items) > 0 { if len(o.Items) > 1 { - return fmt.Errorf("only one dispatch_input block is allowed in a task. Number of dispatch_input blocks found: %d", len(o.Items)) + return fmt.Errorf("only one dispatch_payload block is allowed in a task. Number of dispatch_payload blocks found: %d", len(o.Items)) } var m map[string]interface{} dispatchBlock := o.Items[0] @@ -760,15 +760,15 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l "file", } if err := checkHCLKeys(dispatchBlock.Val, valid); err != nil { - return multierror.Prefix(err, fmt.Sprintf("'%s', dispatch_input ->", n)) + return multierror.Prefix(err, fmt.Sprintf("'%s', dispatch_payload ->", n)) } if err := hcl.DecodeObject(&m, dispatchBlock.Val); err != nil { return err } - t.DispatchInput = &structs.DispatchInputConfig{} - if err := mapstructure.WeakDecode(m, t.DispatchInput); err != nil { + t.DispatchPayload = &structs.DispatchPayloadConfig{} + if err := mapstructure.WeakDecode(m, t.DispatchPayload); err != nil { return err } } diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index e9c6c8944..7bd404a55 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -571,7 +571,7 @@ func TestParse(t *testing.T) { MaxFiles: 10, MaxFileSizeMB: 10, }, - DispatchInput: &structs.DispatchInputConfig{ + DispatchPayload: &structs.DispatchPayloadConfig{ File: "foo/bar", }, }, diff --git a/jobspec/test-fixtures/parameterized_job.hcl b/jobspec/test-fixtures/parameterized_job.hcl index 1b95a7de2..1f31e583d 100644 --- a/jobspec/test-fixtures/parameterized_job.hcl +++ b/jobspec/test-fixtures/parameterized_job.hcl @@ -11,7 +11,7 @@ job "parameterized_job" { driver = "docker" resources {} - dispatch_input { + dispatch_payload { file = "foo/bar" } } diff --git a/nomad/structs/diff.go b/nomad/structs/diff.go index 9fca44c8b..82dc8ea89 100644 --- a/nomad/structs/diff.go +++ b/nomad/structs/diff.go @@ -375,8 +375,8 @@ func (t *Task) Diff(other *Task, contextual bool) (*TaskDiff, error) { diff.Objects = append(diff.Objects, lDiff) } - // Dispatch Input diff - dDiff := primitiveObjectDiff(t.DispatchInput, other.DispatchInput, nil, "DispatchInput", contextual) + // Dispatch payload diff + dDiff := primitiveObjectDiff(t.DispatchPayload, other.DispatchPayload, nil, "DispatchPayload", contextual) if dDiff != nil { diff.Objects = append(diff.Objects, dDiff) } diff --git a/nomad/structs/diff_test.go b/nomad/structs/diff_test.go index 5cd258a55..0fa6e18dc 100644 --- a/nomad/structs/diff_test.go +++ b/nomad/structs/diff_test.go @@ -3666,10 +3666,10 @@ func TestTaskDiff(t *testing.T) { }, }, { - // DispatchInput added + // DispatchPayload added Old: &Task{}, New: &Task{ - DispatchInput: &DispatchInputConfig{ + DispatchPayload: &DispatchPayloadConfig{ File: "foo", }, }, @@ -3678,7 +3678,7 @@ func TestTaskDiff(t *testing.T) { Objects: []*ObjectDiff{ { Type: DiffTypeAdded, - Name: "DispatchInput", + Name: "DispatchPayload", Fields: []*FieldDiff{ { Type: DiffTypeAdded, @@ -3692,9 +3692,9 @@ func TestTaskDiff(t *testing.T) { }, }, { - // DispatchInput deleted + // DispatchPayload deleted Old: &Task{ - DispatchInput: &DispatchInputConfig{ + DispatchPayload: &DispatchPayloadConfig{ File: "foo", }, }, @@ -3704,7 +3704,7 @@ func TestTaskDiff(t *testing.T) { Objects: []*ObjectDiff{ { Type: DiffTypeDeleted, - Name: "DispatchInput", + Name: "DispatchPayload", Fields: []*FieldDiff{ { Type: DiffTypeDeleted, @@ -3718,14 +3718,14 @@ func TestTaskDiff(t *testing.T) { }, }, { - // Dispatch input edited + // Dispatch payload edited Old: &Task{ - DispatchInput: &DispatchInputConfig{ + DispatchPayload: &DispatchPayloadConfig{ File: "foo", }, }, New: &Task{ - DispatchInput: &DispatchInputConfig{ + DispatchPayload: &DispatchPayloadConfig{ File: "bar", }, }, @@ -3734,7 +3734,7 @@ func TestTaskDiff(t *testing.T) { Objects: []*ObjectDiff{ { Type: DiffTypeEdited, - Name: "DispatchInput", + Name: "DispatchPayload", Fields: []*FieldDiff{ { Type: DiffTypeEdited, @@ -3748,16 +3748,16 @@ func TestTaskDiff(t *testing.T) { }, }, { - // DispatchInput edited with context. Place holder for if more + // DispatchPayload edited with context. Place holder for if more // fields are added Contextual: true, Old: &Task{ - DispatchInput: &DispatchInputConfig{ + DispatchPayload: &DispatchPayloadConfig{ File: "foo", }, }, New: &Task{ - DispatchInput: &DispatchInputConfig{ + DispatchPayload: &DispatchPayloadConfig{ File: "bar", }, }, @@ -3766,7 +3766,7 @@ func TestTaskDiff(t *testing.T) { Objects: []*ObjectDiff{ { Type: DiffTypeEdited, - Name: "DispatchInput", + Name: "DispatchPayload", Fields: []*FieldDiff{ { Type: DiffTypeEdited, diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index ceb7df38a..71c437d65 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -1694,22 +1694,22 @@ func DispatchedID(templateID string, t time.Time) string { return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u) } -// DispatchInputConfig configures how a task gets its input from a job dispatch -type DispatchInputConfig struct { +// DispatchPayloadConfig configures how a task gets its input from a job dispatch +type DispatchPayloadConfig struct { // File specifies a relative path to where the input data should be written File string } -func (d *DispatchInputConfig) Copy() *DispatchInputConfig { +func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig { if d == nil { return nil } - nd := new(DispatchInputConfig) + nd := new(DispatchPayloadConfig) *nd = *d return nd } -func (d *DispatchInputConfig) Validate() error { +func (d *DispatchPayloadConfig) Validate() error { // Verify the destination doesn't escape escaped, err := PathEscapesAllocDir("task/local/", d.File) if err != nil { @@ -2272,8 +2272,8 @@ type Task struct { // Resources is the resources needed by this task Resources *Resources - // DispatchInput configures how the task retrieves its input from a dispatch - DispatchInput *DispatchInputConfig + // DispatchPayload configures how the task retrieves its input from a dispatch + DispatchPayload *DispatchPayloadConfig // Meta is used to associate arbitrary metadata with this // task. This is opaque to Nomad. @@ -2312,7 +2312,7 @@ func (t *Task) Copy() *Task { nt.Vault = nt.Vault.Copy() nt.Resources = nt.Resources.Copy() nt.Meta = helper.CopyMapStringString(nt.Meta) - nt.DispatchInput = nt.DispatchInput.Copy() + nt.DispatchPayload = nt.DispatchPayload.Copy() if t.Artifacts != nil { artifacts := make([]*TaskArtifact, 0, len(t.Artifacts)) @@ -2477,10 +2477,10 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk) error { } } - // Validate the dispatch input block if there - if t.DispatchInput != nil { - if err := t.DispatchInput.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Input validation failed: %v", err)) + // Validate the dispatch payload block if there + if t.DispatchPayload != nil { + if err := t.DispatchPayload.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err)) } } diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index 72c097224..24bbc83d1 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -1518,8 +1518,8 @@ func TestParameterizedJobConfig_Canonicalize(t *testing.T) { } } -func TestDispatchInputConfig_Validate(t *testing.T) { - d := &DispatchInputConfig{ +func TestDispatchPayloadConfig_Validate(t *testing.T) { + d := &DispatchPayloadConfig{ File: "foo", } From 98c17e519571aade164f3fd0a35143c135baf3fe Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 21:13:18 -0800 Subject: [PATCH 11/56] Rename meta/parsing --- jobspec/parse.go | 36 ++----------------- jobspec/test-fixtures/parameterized_job.hcl | 6 ++-- nomad/structs/structs.go | 4 +-- .../job-specification/parameterized.html.md | 20 +++++------ 4 files changed, 16 insertions(+), 50 deletions(-) diff --git a/jobspec/parse.go b/jobspec/parse.go index 7c4215698..1bb6c2bd3 100644 --- a/jobspec/parse.go +++ b/jobspec/parse.go @@ -1259,12 +1259,11 @@ func parseParameterizedJob(result **structs.ParameterizedJobConfig, list *ast.Ob return err } - delete(m, "meta") - // Check for invalid keys valid := []string{ "payload", - "meta_keys", + "meta_required", + "meta_optional", } if err := checkHCLKeys(o.Val, valid); err != nil { return err @@ -1276,37 +1275,6 @@ func parseParameterizedJob(result **structs.ParameterizedJobConfig, list *ast.Ob return err } - var listVal *ast.ObjectList - if ot, ok := o.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return fmt.Errorf("parameterized block should be an object") - } - - // Parse the meta block - if metaList := listVal.Filter("meta_keys"); len(metaList.Items) > 0 { - // Get our resource object - o := metaList.Items[0] - - var m map[string]interface{} - if err := hcl.DecodeObject(&m, o.Val); err != nil { - return err - } - - // Check for invalid keys - valid := []string{ - "optional", - "required", - } - if err := checkHCLKeys(o.Val, valid); err != nil { - return err - } - - if err := mapstructure.WeakDecode(m, &d); err != nil { - return err - } - } - *result = &d return nil } diff --git a/jobspec/test-fixtures/parameterized_job.hcl b/jobspec/test-fixtures/parameterized_job.hcl index 1f31e583d..a8c5bfe5c 100644 --- a/jobspec/test-fixtures/parameterized_job.hcl +++ b/jobspec/test-fixtures/parameterized_job.hcl @@ -1,10 +1,8 @@ job "parameterized_job" { parameterized { payload = "required" - meta_keys { - required = ["foo", "bar"] - optional = ["baz", "bam"] - } + meta_required = ["foo", "bar"] + meta_optional = ["baz", "bam"] } group "foo" { task "bar" { diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 71c437d65..582d0f3ba 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -1647,10 +1647,10 @@ type ParameterizedJobConfig struct { Payload string // MetaRequired is metadata keys that must be specified by the dispatcher - MetaRequired []string `mapstructure:"required"` + MetaRequired []string `mapstructure:"meta_required"` // MetaOptional is metadata keys that may be specified by the dispatcher - MetaOptional []string `mapstructure:"optional"` + MetaOptional []string `mapstructure:"meta_optional"` } func (d *ParameterizedJobConfig) Validate() error { diff --git a/website/source/docs/job-specification/parameterized.html.md b/website/source/docs/job-specification/parameterized.html.md index ee33867ce..59e0cfdb6 100644 --- a/website/source/docs/job-specification/parameterized.html.md +++ b/website/source/docs/job-specification/parameterized.html.md @@ -54,8 +54,8 @@ programming languages. job "docs" { parameterized { payload = "required" - required_meta = ["dispatcher_email"] - optional_meta = ["pager_email"] + meta_required = ["dispatcher_email"] + meta_optional = ["pager_email"] } } ``` @@ -66,6 +66,12 @@ job "docs" { ## `parameterized` Parameters +- `meta_optional` `([]string: nil)` - Specifies the set of metadata keys that + may be provided when dispatching against the job. + +- `meta_required` `([]string: nil)` - Specifies the set of metadata keys that + must be provided when dispatching against the job. + - `payload` `(string: "optional")` - Specifies the requirement of providing a payload when dispatching against the parameterized job. The options for this field are: @@ -76,12 +82,6 @@ job "docs" { - `"forbidden"` - A payload is forbidden when dispatching against the job. -- `required_meta` `([]string: nil)` - Specifies the set of metadata keys that - must be provided when dispatching against the job. - -- `optional_meta` `([]string: nil)` - Specifies the set of metadata keys that - may be provided when dispatching against the job. - ## `parameterized` Examples The following examples show non-runnable example parameterized jobs: @@ -98,7 +98,7 @@ job "video-encode" { parameterized { payload = "required" - required_meta = ["dispatcher_email"] + meta_required = ["dispatcher_email"] } group "encode" { @@ -132,7 +132,7 @@ job "email-blast" { parameterized { payload = "forbidden" - required_meta = ["CAMPAIGN_ID"] + meta_required = ["CAMPAIGN_ID"] } group "emails" { From f49912cbabf44a298d129c8aad816ea55d205a9b Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 21:16:18 -0800 Subject: [PATCH 12/56] alphabetical --- .../docs/job-specification/artifact.html.md | 8 +++---- .../docs/job-specification/template.html.md | 22 +++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/website/source/docs/job-specification/artifact.html.md b/website/source/docs/job-specification/artifact.html.md index e773241d2..bcd4ef394 100644 --- a/website/source/docs/job-specification/artifact.html.md +++ b/website/source/docs/job-specification/artifact.html.md @@ -51,15 +51,15 @@ before the starting the task. default value is to place the binary in `local/`. The destination is treated as a directory and source files will be downloaded into that directory path. -- `source` `(string: )` - Specifies the URL of the artifact to download. - Only `http`, `https`, and `s3` URLs are supported. See [`go-getter`][go-getter] - for details. - - `options` `(map: nil)` - Specifies configuration parameters to fetch the artifact. The key-value pairs map directly to parameters appended to the supplied `source` URL. Please see the [`go-getter` documentation][go-getter] for a complete list of options and examples +- `source` `(string: )` - Specifies the URL of the artifact to download. + Only `http`, `https`, and `s3` URLs are supported. See [`go-getter`][go-getter] + for details. + ## `artifact` Examples The following examples only show the `artifact` stanzas. Remember that the diff --git a/website/source/docs/job-specification/template.html.md b/website/source/docs/job-specification/template.html.md index 319e2c0cd..d1d674492 100644 --- a/website/source/docs/job-specification/template.html.md +++ b/website/source/docs/job-specification/template.html.md @@ -47,19 +47,17 @@ README][ct]. ## `template` Parameters -- `source` `(string: "")` - Specifies the path to the template to be rendered. - One of `source` or `data` must be specified, but not both. This source can - optionally be fetched using an [`artifact`][artifact] resource. This template - must exist on the machine prior to starting the task; it is not possible to - reference a template inside of a Docker container, for example. - -- `destination` `(string: )` - Specifies the location where the - resulting template should be rendered, relative to the task directory. +- `change_signal` `(string: "")` - Specifies the signal to send to the task as a + string like `"SIGUSR1"` or `"SIGINT"`. This option is required if the + `change_mode` is `signal`. - `data` `(string: "")` - Specifies the raw template to execute. One of `source` or `data` must be specified, but not both. This is useful for smaller templates, but we recommend using `source` for larger templates. +- `destination` `(string: )` - Specifies the location where the + resulting template should be rendered, relative to the task directory. + - `change_mode` `(string: "restart")` - Specifies the behavior Nomad should take if the rendered template changes. The possible values are: @@ -67,9 +65,11 @@ README][ct]. - `"restart"` - restart the task - `"signal"` - send a configurable signal to the task -- `change_signal` `(string: "")` - Specifies the signal to send to the task as a - string like `"SIGUSR1"` or `"SIGINT"`. This option is required if the - `change_mode` is `signal`. +- `source` `(string: "")` - Specifies the path to the template to be rendered. + One of `source` or `data` must be specified, but not both. This source can + optionally be fetched using an [`artifact`][artifact] resource. This template + must exist on the machine prior to starting the task; it is not possible to + reference a template inside of a Docker container, for example. - `splay` `(string: "5s")` - Specifies a random amount of time to wait between 0ms and the given splay value before invoking the change mode. This is From 82fb381fc5184d74607e69071e297da8ef458b6e Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 21:20:50 -0800 Subject: [PATCH 13/56] missing tests --- nomad/structs/diff.go | 4 ++-- nomad/structs/diff_test.go | 36 ++++++++++++++++++------------------ 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/nomad/structs/diff.go b/nomad/structs/diff.go index 82dc8ea89..f4ec05e69 100644 --- a/nomad/structs/diff.go +++ b/nomad/structs/diff.go @@ -667,11 +667,11 @@ func parameterizedJobDiff(old, new *ParameterizedJobConfig, contextual bool) *Ob diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) // Meta diffs - if optionalDiff := stringSetDiff(old.MetaOptional, new.MetaOptional, "OptionalMeta", contextual); optionalDiff != nil { + if optionalDiff := stringSetDiff(old.MetaOptional, new.MetaOptional, "MetaOptional", contextual); optionalDiff != nil { diff.Objects = append(diff.Objects, optionalDiff) } - if requiredDiff := stringSetDiff(old.MetaRequired, new.MetaRequired, "RequiredMeta", contextual); requiredDiff != nil { + if requiredDiff := stringSetDiff(old.MetaRequired, new.MetaRequired, "MetaRequired", contextual); requiredDiff != nil { diff.Objects = append(diff.Objects, requiredDiff) } diff --git a/nomad/structs/diff_test.go b/nomad/structs/diff_test.go index 0fa6e18dc..b5ca58a78 100644 --- a/nomad/structs/diff_test.go +++ b/nomad/structs/diff_test.go @@ -904,11 +904,11 @@ func TestJobDiff(t *testing.T) { Objects: []*ObjectDiff{ { Type: DiffTypeAdded, - Name: "OptionalMeta", + Name: "MetaOptional", Fields: []*FieldDiff{ { Type: DiffTypeAdded, - Name: "OptionalMeta", + Name: "MetaOptional", Old: "", New: "foo", }, @@ -916,11 +916,11 @@ func TestJobDiff(t *testing.T) { }, { Type: DiffTypeAdded, - Name: "RequiredMeta", + Name: "MetaRequired", Fields: []*FieldDiff{ { Type: DiffTypeAdded, - Name: "RequiredMeta", + Name: "MetaRequired", Old: "", New: "bar", }, @@ -958,11 +958,11 @@ func TestJobDiff(t *testing.T) { Objects: []*ObjectDiff{ { Type: DiffTypeDeleted, - Name: "OptionalMeta", + Name: "MetaOptional", Fields: []*FieldDiff{ { Type: DiffTypeDeleted, - Name: "OptionalMeta", + Name: "MetaOptional", Old: "foo", New: "", }, @@ -970,11 +970,11 @@ func TestJobDiff(t *testing.T) { }, { Type: DiffTypeDeleted, - Name: "RequiredMeta", + Name: "MetaRequired", Fields: []*FieldDiff{ { Type: DiffTypeDeleted, - Name: "RequiredMeta", + Name: "MetaRequired", Old: "bar", New: "", }, @@ -1018,17 +1018,17 @@ func TestJobDiff(t *testing.T) { Objects: []*ObjectDiff{ { Type: DiffTypeEdited, - Name: "OptionalMeta", + Name: "MetaOptional", Fields: []*FieldDiff{ { Type: DiffTypeAdded, - Name: "OptionalMeta", + Name: "MetaOptional", Old: "", New: "bam", }, { Type: DiffTypeDeleted, - Name: "OptionalMeta", + Name: "MetaOptional", Old: "foo", New: "", }, @@ -1036,17 +1036,17 @@ func TestJobDiff(t *testing.T) { }, { Type: DiffTypeEdited, - Name: "RequiredMeta", + Name: "MetaRequired", Fields: []*FieldDiff{ { Type: DiffTypeAdded, - Name: "RequiredMeta", + Name: "MetaRequired", Old: "", New: "bang", }, { Type: DiffTypeDeleted, - Name: "RequiredMeta", + Name: "MetaRequired", Old: "bar", New: "", }, @@ -1091,11 +1091,11 @@ func TestJobDiff(t *testing.T) { Objects: []*ObjectDiff{ { Type: DiffTypeNone, - Name: "OptionalMeta", + Name: "MetaOptional", Fields: []*FieldDiff{ { Type: DiffTypeNone, - Name: "OptionalMeta", + Name: "MetaOptional", Old: "foo", New: "foo", }, @@ -1103,11 +1103,11 @@ func TestJobDiff(t *testing.T) { }, { Type: DiffTypeNone, - Name: "RequiredMeta", + Name: "MetaRequired", Fields: []*FieldDiff{ { Type: DiffTypeNone, - Name: "RequiredMeta", + Name: "MetaRequired", Old: "bar", New: "bar", }, From 62e6596ea1d9bd7f4c4bf7df5e5303b48c12f2b3 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 21:35:53 -0800 Subject: [PATCH 14/56] API --- website/source/docs/http/job.html.md | 47 +++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/website/source/docs/http/job.html.md b/website/source/docs/http/job.html.md index 2e067f9ca..ca240d964 100644 --- a/website/source/docs/http/job.html.md +++ b/website/source/docs/http/job.html.md @@ -137,7 +137,6 @@ region is used; another region can be specified using the `?region=` query param "ModifyIndex": 14 } ``` - @@ -333,6 +332,52 @@ region is used; another region can be specified using the `?region=` query param +
+
Description
+
+ Dispatch a new instance of a parameterized job. +
+ +
Method
+
PUT or POST
+ +
URL
+
`/v1/job//dispatch`
+ +
Parameters
+
+
    +
  • + Payload + optional + A `[]byte` array encoded as a base64 string. +
  • +
  • + Meta + optional + A `map[string]string` of metadata keys to their values. +
  • +
+
+ +
Returns
+
+ + ```javascript + { + "KnownLeader": false, + "LastContact": 0, + "Index": 13, + "JobCreateIndex": 12, + "EvalCreateIndex": 13, + "EvalID": "e5f55fac-bc69-119d-528a-1fc7ade5e02c", + "DispatchedJobID": "example/dispatch-1485408778-81644024" + } + ``` + +
+
+
Description
From 30abdb09cc4c768aab07491cf5e5002426259956 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 22:15:00 -0800 Subject: [PATCH 15/56] JSON job --- website/source/docs/http/json-jobs.html.md | 41 ++++++++++++++++++- .../dispatch-payload.html.md | 2 +- .../source/docs/job-specification/job.html.md | 10 ++++- .../job-specification/parameterized.html.md | 4 +- .../docs/job-specification/task.html.md | 4 ++ 5 files changed, 55 insertions(+), 6 deletions(-) diff --git a/website/source/docs/http/json-jobs.html.md b/website/source/docs/http/json-jobs.html.md index f86aa7559..ed279db5a 100644 --- a/website/source/docs/http/json-jobs.html.md +++ b/website/source/docs/http/json-jobs.html.md @@ -137,7 +137,10 @@ Below is an example of a JSON object that submits a `periodic` job to Nomad: }, "RelativeDest":"local/" } - ] + ], + "DispatchPayload": { + "File": "config.json" + } } ], "RestartPolicy":{ @@ -165,7 +168,17 @@ Below is an example of a JSON object that submits a `periodic` job to Nomad: "Meta":{ "foo":"bar", "baz":"pipe" - } + }, + "ParameterizedJob": { + "Payload": "required", + "MetaRequired": [ + "foo" + ], + "MetaOptional": [ + "bar" + ] + }, + "Payload": null } } ``` @@ -194,6 +207,24 @@ The `Job` object supports the following keys: * `Meta` - Annotates the job with opaque metadata. +* `ParameterizedJob` - Specifies the job as a paramterized job such that it can + be dispatched against. The `ParamaterizedJob` object supports the following + attributes: + + * `MetaOptional` - Specifies the set of metadata keys that may be provided + when dispatching against the job as a string array. + + * `MetaRequired` - Specifies the set of metadata keys that must be provided + when dispatching against the job as a string array. + + * `Payload` - Specifies the requirement of providing a payload when + dispatching against the parameterized job. The options for this field are + "optional", "required" and "forbidden" + +* `Payload` - The payload may not be set when submitting a job but may appear in + a dispatched job. The `Payload` will be a base64 encoded string containing the + payload that the job was dispatched with. + * `Priority` - Specifies the job priority which is used to prioritize scheduling and access to resources. Must be between 1 and 100 inclusively, and defaults to 50. @@ -295,6 +326,12 @@ The `Task` object supports the following keys: * `Constraints` - This is a list of `Constraint` objects. See the constraint reference for more details. +- `DispatchPayload` - Configures the task to have access to dispatch payloads. + The `DispatchPayload` object supports the following attributes: + + * `File` - Specifies the file name to write the content of dispatch payload + to. The file is written relative to the task's local directory. + * `Driver` - Specifies the task driver that should be used to run the task. See the [driver documentation](/docs/drivers/index.html) for what is available. Examples include `docker`, `qemu`, `java`, and `exec`. diff --git a/website/source/docs/job-specification/dispatch-payload.html.md b/website/source/docs/job-specification/dispatch-payload.html.md index 70fed94d6..357d0583b 100644 --- a/website/source/docs/job-specification/dispatch-payload.html.md +++ b/website/source/docs/job-specification/dispatch-payload.html.md @@ -39,7 +39,7 @@ job "docs" { ## `dispatch_payload` Parameters - `file` `(string: "")` - Specifies the file name to write the content of - dispatch payload to. The file is written to a [task's local + dispatch payload to. The file is written relative to the [task's local directory][localdir]. ## `dispatch_payload` Examples diff --git a/website/source/docs/job-specification/job.html.md b/website/source/docs/job-specification/job.html.md index 844dba133..9c95ad918 100644 --- a/website/source/docs/job-specification/job.html.md +++ b/website/source/docs/job-specification/job.html.md @@ -42,6 +42,10 @@ job "docs" { "my-key" = "my-value" } + parameterized { + # ... + } + periodic { # ... } @@ -74,13 +78,16 @@ job "docs" { - `datacenters` `(array: )` - A list of datacenters in the region which are eligible for task placement. This must be provided, and does not have a default. -- `group` ([Group][group]: ) - Specifies the start of a +- `group` ([Group][group]: \) - Specifies the start of a group of tasks. This can be provided multiple times to define additional groups. Group names must be unique within the job file. - `meta` ([Meta][]: nil) - Specifies a key-value map that annotates with user-defined metadata. +- `parameterized` ([Parameterized][parameterized]: nil) - Specifies + the job as a paramterized job such that it can be dispatched against. + - `periodic` ([Periodic][]: nil) - Allows the job to be scheduled at fixed times, dates or intervals. @@ -215,6 +222,7 @@ $ VAULT_TOKEN="..." nomad run example.nomad [constraint]: /docs/job-specification/constraint.html "Nomad constraint Job Specification" [group]: /docs/job-specification/group.html "Nomad group Job Specification" [meta]: /docs/job-specification/meta.html "Nomad meta Job Specification" +[parameterized]: /docs/job-specification/parameterized.html "Nomad parameterized Job Specification" [periodic]: /docs/job-specification/periodic.html "Nomad periodic Job Specification" [task]: /docs/job-specification/task.html "Nomad task Job Specification" [update]: /docs/job-specification/update.html "Nomad update Job Specification" diff --git a/website/source/docs/job-specification/parameterized.html.md b/website/source/docs/job-specification/parameterized.html.md index 59e0cfdb6..1dcb5cd6c 100644 --- a/website/source/docs/job-specification/parameterized.html.md +++ b/website/source/docs/job-specification/parameterized.html.md @@ -66,10 +66,10 @@ job "docs" { ## `parameterized` Parameters -- `meta_optional` `([]string: nil)` - Specifies the set of metadata keys that +- `meta_optional` `(array: nil)` - Specifies the set of metadata keys that may be provided when dispatching against the job. -- `meta_required` `([]string: nil)` - Specifies the set of metadata keys that +- `meta_required` `(array: nil)` - Specifies the set of metadata keys that must be provided when dispatching against the job. - `payload` `(string: "optional")` - Specifies the requirement of providing a diff --git a/website/source/docs/job-specification/task.html.md b/website/source/docs/job-specification/task.html.md index 38227ce5c..790c17be9 100644 --- a/website/source/docs/job-specification/task.html.md +++ b/website/source/docs/job-specification/task.html.md @@ -37,6 +37,9 @@ job "docs" { constraints on the task. This can be provided multiple times to define additional constraints. +- `dispatch_payload` ([DispatchPayload][]: nil) - Configures the + task to have access to dispatch payloads. + - `driver` - Specifies the task driver that should be used to run the task. See the [driver documentation](/docs/drivers/index.html) for what is available. Examples include `docker`, `qemu`, `java`, and `exec`. @@ -163,6 +166,7 @@ task "server" { [artifact]: /docs/job-specification/artifact.html "Nomad artifact Job Specification" [consul]: https://www.consul.io/ "Consul by HashiCorp" [constraint]: /docs/job-specification/constraint.html "Nomad constraint Job Specification" +[dispatchpayload]: /docs/job-specification/dispatch-payload.html "Nomad dispatch_payload Job Specification" [env]: /docs/job-specification/env.html "Nomad env Job Specification" [meta]: /docs/job-specification/meta.html "Nomad meta Job Specification" [resources]: /docs/job-specification/resources.html "Nomad resources Job Specification" From 7e6fcfc7928b9cfdfd36d93f20536e0f2066293e Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 22:16:20 -0800 Subject: [PATCH 16/56] move file --- .../{dispatch-payload.html.md => dispatch_payload.html.md} | 0 website/source/docs/job-specification/parameterized.html.md | 2 +- website/source/docs/job-specification/task.html.md | 2 +- website/source/layouts/docs.erb | 2 +- 4 files changed, 3 insertions(+), 3 deletions(-) rename website/source/docs/job-specification/{dispatch-payload.html.md => dispatch_payload.html.md} (100%) diff --git a/website/source/docs/job-specification/dispatch-payload.html.md b/website/source/docs/job-specification/dispatch_payload.html.md similarity index 100% rename from website/source/docs/job-specification/dispatch-payload.html.md rename to website/source/docs/job-specification/dispatch_payload.html.md diff --git a/website/source/docs/job-specification/parameterized.html.md b/website/source/docs/job-specification/parameterized.html.md index 1dcb5cd6c..2aff51470 100644 --- a/website/source/docs/job-specification/parameterized.html.md +++ b/website/source/docs/job-specification/parameterized.html.md @@ -157,4 +157,4 @@ job "email-blast" { [dispatch command]: /docs/commands/job-dispatch.html "Nomad Job Dispatch Command" [resources]: /docs/job-specification/resources.html "Nomad resources Job Specification" [interpolation]: /docs/runtime/interpolation.html "Nomad Runtime Interpolation" -[dispatch_payload]: /docs/job-specification/dispatch-payload.html "Nomad dispatch_payload Job Specification" +[dispatch_payload]: /docs/job-specification/dispatch_payload.html "Nomad dispatch_payload Job Specification" diff --git a/website/source/docs/job-specification/task.html.md b/website/source/docs/job-specification/task.html.md index 790c17be9..0bc47c42c 100644 --- a/website/source/docs/job-specification/task.html.md +++ b/website/source/docs/job-specification/task.html.md @@ -166,7 +166,7 @@ task "server" { [artifact]: /docs/job-specification/artifact.html "Nomad artifact Job Specification" [consul]: https://www.consul.io/ "Consul by HashiCorp" [constraint]: /docs/job-specification/constraint.html "Nomad constraint Job Specification" -[dispatchpayload]: /docs/job-specification/dispatch-payload.html "Nomad dispatch_payload Job Specification" +[dispatchpayload]: /docs/job-specification/dispatch_payload.html "Nomad dispatch_payload Job Specification" [env]: /docs/job-specification/env.html "Nomad env Job Specification" [meta]: /docs/job-specification/meta.html "Nomad meta Job Specification" [resources]: /docs/job-specification/resources.html "Nomad resources Job Specification" diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index f3b00a5fa..48fb8ebdd 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -49,7 +49,7 @@ constraint > - dispatch_payload + dispatch_payload > env From aa51fe6f2e78809309c9e619475962879cf8980a Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 22:21:51 -0800 Subject: [PATCH 17/56] Status examples --- .../source/docs/commands/status.html.md.erb | 115 +++++++++++++----- 1 file changed, 85 insertions(+), 30 deletions(-) diff --git a/website/source/docs/commands/status.html.md.erb b/website/source/docs/commands/status.html.md.erb index 0fb643ba0..5735b63ce 100644 --- a/website/source/docs/commands/status.html.md.erb +++ b/website/source/docs/commands/status.html.md.erb @@ -54,26 +54,28 @@ Short view of a specific job: ``` $ nomad status -short job1 -ID = job1 -Name = Test Job -Type = service -Priority = 3 -Datacenters = dc1,dc2,dc3 -Status = pending -Periodic = false +ID = job1 +Name = Test Job +Type = service +Priority = 3 +Datacenters = dc1,dc2,dc3 +Status = pending +Periodic = false +Parameterized = false ``` Full status information of a job: ``` $ nomad status example -ID = example -Name = example -Type = service -Priority = 50 -Datacenters = dc1 -Status = running -Periodic = false +ID = example +Name = example +Type = service +Priority = 50 +Datacenters = dc1 +Status = running +Periodic = false +Parameterized = false Summary Task Group Queued Starting Running Failed Complete Lost @@ -84,17 +86,71 @@ ID Eval ID Node ID Task Group Desired Status Created At 24cfd201 81efc2fa 8d0331e9 cache run running 08/08/16 21:03:19 CDT ``` +Full status information of a perodic job: + +``` +ID = example +Name = example +Type = batch +Priority = 50 +Datacenters = dc1 +Status = running +Periodic = true +Parameterized = false +Next Periodic Launch = 01/26/17 06:19:46 UTC (1s from now) + +Children Job Summary +Pending Running Dead +0 5 0 + +Previously Launched Jobs +ID Status +example/periodic-1485411581 running +example/periodic-1485411582 running +example/periodic-1485411583 running +example/periodic-1485411584 running +example/periodic-1485411585 running +``` + +Full status information of a parameterized job: + +``` +ID = example +Name = example +Type = batch +Priority = 50 +Datacenters = dc1 +Status = running +Periodic = false +Parameterized = true + +Parameterized Job +Payload = required +Required Metadata = foo +Optional Metadata = bar + +Parameterized Job Summary +Pending Running Dead +0 2 0 + +Dispatched Jobs +ID Status +example/dispatch-1485411496-58f24d2d running +example/dispatch-1485411499-fa2ee40e running +``` + Full status information of a job with placement failures: ``` $ nomad status example -ID = example -Name = example -Type = service -Priority = 50 -Datacenters = dc1 -Status = running -Periodic = false +ID = example +Name = example +Type = service +Priority = 50 +Datacenters = dc1 +Status = running +Periodic = false +Parameterized = false Summary Task Group Queued Starting Running Failed Complete Lost @@ -120,13 +176,14 @@ become availables so that it can place the remaining allocations. ``` $ nomad status -evals example -ID = example -Name = example -Type = service -Priority = 50 -Datacenters = dc1 -Status = running -Periodic = false +ID = example +Name = example +Type = service +Priority = 50 +Datacenters = dc1 +Status = running +Periodic = false +Parameterized = false Summary Task Group Queued Starting Running Failed Complete Lost @@ -152,5 +209,3 @@ b82f58b6 8bf94335 8d0331e9 cache run running 08/08/16 21:03:19 CD ed3665f5 8bf94335 8d0331e9 cache run running 08/08/16 21:03:19 CDT 24cfd201 8bf94335 8d0331e9 cache run running 08/08/16 21:03:19 CDT ``` - -TODO: CRON AND DISPATCH From 36dad988965169e3737db7d8c443e7c8e28a4496 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 25 Jan 2017 22:27:15 -0800 Subject: [PATCH 18/56] Children api --- CHANGELOG.md | 5 +++-- website/source/docs/http/job.html.md | 5 +++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a16113f9..2ff654764 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,10 @@ ## 0.5.3 (Unreleased) IMPROVEMENTS: - * api: Added APIs for requesting GC of allocations [GH-2192] - * core: Introduce Parameterized Jobs and Dispatch command/API [GH-2128] + * core: Introduce parameterized jobs and dispatch command/API [GH-2128] * core: Cancel blocked evals upon successful one for job [GH-2155] + * api: Added APIs for requesting GC of allocations [GH-2192] + * api: Job summary endpoint includes summary status for child jobs [GH-2128] * api/client: Plain text log streaming suitable for viewing logs in a browser [GH-2235] * cli: Defaulting to showing allocations which belong to currently registered diff --git a/website/source/docs/http/job.html.md b/website/source/docs/http/job.html.md index ca240d964..ee48655ab 100644 --- a/website/source/docs/http/job.html.md +++ b/website/source/docs/http/job.html.md @@ -274,6 +274,11 @@ region is used; another region can be specified using the `?region=` query param ```javascript { "JobID": "example", + "Children": { + "Dead": 0, + "Running": 7, + "Pending": 2 + }, "Summary": { "cache": { "Queued": 0, From 7e549679e97a151d2be8790dc5ff260a77e8d7a0 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 26 Jan 2017 11:31:47 -0800 Subject: [PATCH 19/56] Respond to comments --- command/job_dispatch.go | 9 +++++---- .../source/docs/commands/job-dispatch.html.md.erb | 13 ++++++++----- website/source/docs/http/job.html.md | 2 +- website/source/docs/http/json-jobs.html.md | 5 +++-- .../docs/job-specification/parameterized.html.md | 3 ++- 5 files changed, 19 insertions(+), 13 deletions(-) diff --git a/command/job_dispatch.go b/command/job_dispatch.go index 763260ec8..c6a334e98 100644 --- a/command/job_dispatch.go +++ b/command/job_dispatch.go @@ -33,10 +33,11 @@ General Options: Dispatch Options: -meta = - Meta takes a key/value pair seperated by "=". The metadata key will be - injected into the job's metadata. The flag can be provided more than - once to inject multiple metadata key/value pairs. Arbitrary injection is not - allowed. The parameterized job must allow the key to be injected. + Meta takes a key/value pair seperated by "=". The metadata key will be + merged into the job's metadata. The job may define a default value for the + key which is overriden when dispatching. The flag can be provided more than + once to inject multiple metadata key/value pairs. Arbitrary keys are not + allowed. The parameterized job must allow the key to be merged. -detach Return immediately instead of entering monitor mode. After job dispatch, diff --git a/website/source/docs/commands/job-dispatch.html.md.erb b/website/source/docs/commands/job-dispatch.html.md.erb index cce195491..cbd17f640 100644 --- a/website/source/docs/commands/job-dispatch.html.md.erb +++ b/website/source/docs/commands/job-dispatch.html.md.erb @@ -15,8 +15,8 @@ page.](https://releases.hashicorp.com/nomad/0.5.3-rc1/) The `job dispatch` command is used to create new instances of a [parameterized job]. The parameterized job captures a job's configuration and runtime requirements in a generic way and `dispatch` is used to provide the input for -the job to run against. One can think of the parameterized job as a function -definition and dispatch is used to invoke the function. +the job to run against. A parameterized job is similar to a function definition, +and dispatch is used to invoke the function. Each time a job is dispatched, a unique job ID is generated. This allows a caller to track the status of the job, much like a future or promise in some @@ -33,6 +33,8 @@ dispatched instance can be provided via stdin by using "-" for the input source or by specifiying a path to a file. Metadata can be supplied by using the meta flag one or more times. +The payload has a **size limit of 16KiB**. + Upon successfully creation, the dispatched job ID will be printed and the triggered evaluation will be monitored. This can be disabled by supplying the detach flag. @@ -49,9 +51,10 @@ client connection issues or internal errors, are indicated by exit code 1. ## Run Options * `-meta`: Meta takes a key/value pair seperated by "=". The metadata key will - be injected into the job's metadata. The flag can be provided more than once - to inject multiple metadata key/value pairs. Arbitrary injection is not - allowed. The parameterized job must allow the key to be injected. + be merged into the job's metadata. The job may define a default value for the + key which is overriden when dispatching. The flag can be provided more than + once to inject multiple metadata key/value pairs. Arbitrary keys are not + allowed. The parameterized job must allow the key to be merged. * `-detach`: Return immediately instead of monitoring. A new evaluation ID will be output, which can be used to examine the evaluation using the diff --git a/website/source/docs/http/job.html.md b/website/source/docs/http/job.html.md index ee48655ab..dfdc01c65 100644 --- a/website/source/docs/http/job.html.md +++ b/website/source/docs/http/job.html.md @@ -355,7 +355,7 @@ region is used; another region can be specified using the `?region=` query param
  • Payload optional - A `[]byte` array encoded as a base64 string. + A `[]byte` array encoded as a base64 string with a maximum size of 16KiB.
  • Meta diff --git a/website/source/docs/http/json-jobs.html.md b/website/source/docs/http/json-jobs.html.md index ed279db5a..7d8cc3bbe 100644 --- a/website/source/docs/http/json-jobs.html.md +++ b/website/source/docs/http/json-jobs.html.md @@ -219,11 +219,12 @@ The `Job` object supports the following keys: * `Payload` - Specifies the requirement of providing a payload when dispatching against the parameterized job. The options for this field are - "optional", "required" and "forbidden" + "optional", "required" and "forbidden". The default value is "optional". * `Payload` - The payload may not be set when submitting a job but may appear in a dispatched job. The `Payload` will be a base64 encoded string containing the - payload that the job was dispatched with. + payload that the job was dispatched with. The `payload` has a **maximum size + of 16 KiB**. * `Priority` - Specifies the job priority which is used to prioritize scheduling and access to resources. Must be between 1 and 100 inclusively, diff --git a/website/source/docs/job-specification/parameterized.html.md b/website/source/docs/job-specification/parameterized.html.md index 2aff51470..6d0cb126c 100644 --- a/website/source/docs/job-specification/parameterized.html.md +++ b/website/source/docs/job-specification/parameterized.html.md @@ -73,7 +73,8 @@ job "docs" { must be provided when dispatching against the job. - `payload` `(string: "optional")` - Specifies the requirement of providing a - payload when dispatching against the parameterized job. The options for this + payload when dispatching against the parameterized job. The **maximum size of a + `payload` is 16 KiB**. The options for this field are: - `"optional"` - A payload is optional when dispatching against the job. From 9ef3b6a2d0fe1ff69f88e03c1561da4279024f61 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 26 Jan 2017 11:57:32 -0800 Subject: [PATCH 20/56] Disallow GC of parameterized jobs This PR makes it so parameterized jobs do not get garbage collected and adds a test. --- nomad/core_sched.go | 2 ++ nomad/core_sched_test.go | 60 ++++++++++++++++++++++++++++++++++++++++ nomad/state/schema.go | 5 ++-- 3 files changed, 65 insertions(+), 2 deletions(-) diff --git a/nomad/core_sched.go b/nomad/core_sched.go index 76eac8917..8cb170045 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -192,6 +192,8 @@ func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error { // The Evaluation GC should not handle batch jobs since those need to be // garbage collected in one shot + // XXX believe there is a bug that if a batch job gets stopped, there is no + // way for it to GC the eval/allocs gc, allocs, err := c.gcEval(eval, oldThreshold, false) if err != nil { return err diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index f6c75753d..72bd4bf66 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -965,6 +965,66 @@ func TestCoreScheduler_JobGC_Force(t *testing.T) { } } +// This test ensures parameterized and periodic jobs don't get GCd +func TestCoreScheduler_JobGC_NonGCable(t *testing.T) { + s1 := testServer(t, nil) + defer s1.Shutdown() + testutil.WaitForLeader(t, s1.RPC) + + // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 + s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) + + // Insert a parameterized job. + state := s1.fsm.State() + job := mock.Job() + job.Type = structs.JobTypeBatch + job.Status = structs.JobStatusRunning + job.ParameterizedJob = &structs.ParameterizedJobConfig{ + Payload: structs.DispatchPayloadRequired, + } + err := state.UpsertJob(1000, job) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Insert a periodic job. + job2 := mock.PeriodicJob() + if err := state.UpsertJob(1001, job2); err != nil { + t.Fatalf("err: %v", err) + } + + // Create a core scheduler + snap, err := state.Snapshot() + if err != nil { + t.Fatalf("err: %v", err) + } + core := NewCoreScheduler(s1, snap) + + // Attempt the GC + gc := s1.coreJobEval(structs.CoreJobForceGC, 1002) + err = core.Process(gc) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should still exist + out, err := state.JobByID(job.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("bad: %v", out) + } + + outE, err := state.JobByID(job2.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if outE == nil { + t.Fatalf("bad: %v", outE) + } +} + func TestCoreScheduler_PartitionReap(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() diff --git a/nomad/state/schema.go b/nomad/state/schema.go index aed0e7ab3..f61b1d109 100644 --- a/nomad/state/schema.go +++ b/nomad/state/schema.go @@ -149,9 +149,10 @@ func jobIsGCable(obj interface{}) (bool, error) { return false, fmt.Errorf("Unexpected type: %v", obj) } - // The job is GCable if it is batch and it is not periodic + // The job is GCable if it is batch, it is not periodic and is not a + // parameterized job. periodic := j.Periodic != nil && j.Periodic.Enabled - gcable := j.Type == structs.JobTypeBatch && !periodic + gcable := j.Type == structs.JobTypeBatch && !periodic && !j.IsParameterized() return gcable, nil } From 87c9859db65e0eec22e0730e0c9eb0b91cc5afbb Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 26 Jan 2017 13:02:13 -0800 Subject: [PATCH 21/56] Fix flaky test --- nomad/vault_test.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/nomad/vault_test.go b/nomad/vault_test.go index e17c3bce0..56e609b99 100644 --- a/nomad/vault_test.go +++ b/nomad/vault_test.go @@ -591,9 +591,6 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) { unblock := make(chan struct{}) for i := 0; i < numRequests; i++ { go func() { - // Ensure all the goroutines are made - time.Sleep(10 * time.Millisecond) - // Lookup ourselves _, err := client.LookupToken(ctx, v.Config.Token) if err != nil { @@ -607,7 +604,7 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) { // Cancel the context cancel() - time.AfterFunc(1*time.Second, func() { close(unblock) }) + close(unblock) }() } @@ -618,9 +615,15 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) { } desired := numRequests - 1 - if cancels != desired { - t.Fatalf("Incorrect number of cancels; got %d; want %d", cancels, desired) - } + testutil.WaitForResult(func() (bool, error) { + if cancels != desired { + return false, fmt.Errorf("Incorrect number of cancels; got %d; want %d", cancels, desired) + } + + return true, nil + }, func(err error) { + t.Fatalf("Connection not established") + }) } func TestVaultClient_CreateToken_Root(t *testing.T) { From 6c4a89989e23efe76b9fcc51ea1a65d18ee57983 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 26 Jan 2017 13:07:50 -0800 Subject: [PATCH 22/56] Fix typo --- command/job_dispatch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/job_dispatch.go b/command/job_dispatch.go index c6a334e98..702fa4a97 100644 --- a/command/job_dispatch.go +++ b/command/job_dispatch.go @@ -51,7 +51,7 @@ Dispatch Options: } func (c *JobDispatchCommand) Synopsis() string { - return "Dispatch an instance of a parametereized job" + return "Dispatch an instance of a parameterized job" } func (c *JobDispatchCommand) Run(args []string) int { From 3c7e018438414a55d085488f4e966f90e4d96634 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 26 Jan 2017 13:25:37 -0800 Subject: [PATCH 23/56] up timing --- client/task_runner_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/task_runner_test.go b/client/task_runner_test.go index e7e2b19db..e2cb99209 100644 --- a/client/task_runner_test.go +++ b/client/task_runner_test.go @@ -536,7 +536,7 @@ func TestTaskRunner_RestartTask(t *testing.T) { task.Driver = "mock_driver" task.Config = map[string]interface{}{ "exit_code": "0", - "run_for": "10s", + "run_for": "100s", } ctx := testTaskRunnerFromAlloc(t, true, alloc) From 9d4e00d7389ec79fd705959ad8b9caa21de5526d Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 26 Jan 2017 13:25:55 -0800 Subject: [PATCH 24/56] BC warning --- website/source/docs/agent/configuration/vault.html.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/source/docs/agent/configuration/vault.html.md b/website/source/docs/agent/configuration/vault.html.md index a491d6348..9d1e012b2 100644 --- a/website/source/docs/agent/configuration/vault.html.md +++ b/website/source/docs/agent/configuration/vault.html.md @@ -51,7 +51,9 @@ vault { The token given to Nomad does not have to be created from this role but must have "update" capability on "auth/token/create/" path in Vault. If this value is unset and the token is created from a role, the value - is defaulted to the role the token is from. + is defaulted to the role the token is from. This is largely for backwards + compatibility. It is recommended to set the `create_from_role` field if Nomad + is deriving child tokens from a role. - `task_token_ttl` `(string: "")` - Specifies the TTL of created tokens when using a root token. This is specified using a label suffix like "30s" or "1h". From 3f870cab49f0a7c14dee32c79219c965f2007837 Mon Sep 17 00:00:00 2001 From: taylorchu Date: Thu, 26 Jan 2017 14:03:32 -0800 Subject: [PATCH 25/56] TWEAK: remove else block in tls handling --- nomad/structs/config/consul.go | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/nomad/structs/config/consul.go b/nomad/structs/config/consul.go index 468b8a084..19ee82c71 100644 --- a/nomad/structs/config/consul.go +++ b/nomad/structs/config/consul.go @@ -1,7 +1,6 @@ package config import ( - "crypto/tls" "fmt" "net/http" "strings" @@ -181,20 +180,12 @@ func (c *ConsulConfig) ApiConfig() (*consul.Config, error) { tlsConfig.InsecureSkipVerify = !*c.VerifySSL } - if tlsConfig.InsecureSkipVerify { - config.HttpClient.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - } - } else { - tlsClientCfg, err := consul.SetupTLSConfig(&tlsConfig) - if err != nil { - return nil, fmt.Errorf("error creating tls client config for consul: %v", err) - } - config.HttpClient.Transport = &http.Transport{ - TLSClientConfig: tlsClientCfg, - } + tlsClientCfg, err := consul.SetupTLSConfig(&tlsConfig) + if err != nil { + return nil, fmt.Errorf("error creating tls client config for consul: %v", err) + } + config.HttpClient.Transport = &http.Transport{ + TLSClientConfig: tlsClientCfg, } } From 581418653263d55928769cf4b65df45f7612317d Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 26 Jan 2017 15:06:14 -0800 Subject: [PATCH 26/56] Loosen timing a bit on travis --- nomad/heartbeat_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nomad/heartbeat_test.go b/nomad/heartbeat_test.go index f14bfd8b1..7ab5495a8 100644 --- a/nomad/heartbeat_test.go +++ b/nomad/heartbeat_test.go @@ -70,7 +70,7 @@ func TestResetHeartbeatTimerLocked(t *testing.T) { t.Fatalf("missing timer") } - time.Sleep(10 * time.Millisecond) + time.Sleep(time.Duration(testutil.TestMultiplier()*10) * time.Millisecond) if _, ok := s1.heartbeatTimers["foo"]; ok { t.Fatalf("timer should be gone") @@ -99,7 +99,7 @@ func TestResetHeartbeatTimerLocked_Renew(t *testing.T) { renew := time.Now() // Watch for invalidation - for time.Now().Sub(renew) < 20*time.Millisecond { + for time.Now().Sub(renew) < time.Duration(testutil.TestMultiplier()*20)*time.Millisecond { s1.heartbeatTimersLock.Lock() _, ok := s1.heartbeatTimers["foo"] s1.heartbeatTimersLock.Unlock() From d914c1b9e1be0cade1ffca65958679deeb83549a Mon Sep 17 00:00:00 2001 From: James Rasell Date: Fri, 27 Jan 2017 11:55:31 +0000 Subject: [PATCH 27/56] Update the 'requirements.html.md' cluster docs with 'Ports Used'. This adds a section detailing the ports and protocols used by Nomad in a similar way to the Consul docs which can be seen 'https://www.consul.io/docs/agent/options.html#ports'. --- website/source/docs/cluster/requirements.html.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/website/source/docs/cluster/requirements.html.md b/website/source/docs/cluster/requirements.html.md index bb8943ab9..7c48089a4 100644 --- a/website/source/docs/cluster/requirements.html.md +++ b/website/source/docs/cluster/requirements.html.md @@ -54,3 +54,18 @@ are not participating in Raft. Thus clients can have 100+ millisecond latency to their servers. This allows having a set of Nomad servers that service clients that can be spread geographically over a continent or even the world in the case of having a single "global" region and many datacenter. + +## Ports Used + +Nomad requires up to 3 different ports to work properly, some on +TCP, UDP, or both protocols. Below we document the requirements for each +port. + +* HTTP API (Default 4646). This is used by clients to talk to the HTTP + API. TCP only. + +* Server RPC (Default 4647). This is used by servers to handle incoming + requests from other agents. TCP only. + +* Serf WAN (Default 4648). This is used by servers to gossip over the + WAN to other servers. TCP and UDP. From a126dcf58277d6078e6490572f5459396b7a0371 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Fri, 27 Jan 2017 12:24:59 -0800 Subject: [PATCH 28/56] new vault docs --- ...rver-role.json => nomad-cluster-role.json} | 4 +- .../source/data/vault/nomad-server-policy.hcl | 39 ++-- .../docs/agent/configuration/vault.html.md | 5 + .../docs/vault-integration/index.html.md | 183 ++++++++++++------ 4 files changed, 161 insertions(+), 70 deletions(-) rename website/source/data/vault/{nomad-server-role.json => nomad-cluster-role.json} (56%) diff --git a/website/source/data/vault/nomad-server-role.json b/website/source/data/vault/nomad-cluster-role.json similarity index 56% rename from website/source/data/vault/nomad-server-role.json rename to website/source/data/vault/nomad-cluster-role.json index 32284d389..37728363f 100644 --- a/website/source/data/vault/nomad-server-role.json +++ b/website/source/data/vault/nomad-cluster-role.json @@ -1,7 +1,7 @@ { - "allowed_policies": "nomad-server", + "disallowed_policies": "nomad-server", "explicit_max_ttl": 0, - "name": "nomad-server", + "name": "nomad-cluster", "orphan": false, "period": 259200, "renewable": true diff --git a/website/source/data/vault/nomad-server-policy.hcl b/website/source/data/vault/nomad-server-policy.hcl index d1190af7e..bd91b852d 100644 --- a/website/source/data/vault/nomad-server-policy.hcl +++ b/website/source/data/vault/nomad-server-policy.hcl @@ -1,20 +1,35 @@ -# Allow creating tokens under the role -path "auth/token/create/nomad-server" { +# Allow creating tokens under "nomad-cluster" role. The role name should be +# updated if "nomad-cluster" is not used. +path "auth/token/create/nomad-cluster" { capabilities = ["update"] } -# Allow looking up the role -path "auth/token/roles/nomad-server" { +# Allow looking up "nomad-cluster" role. The role name should be updated if +# "nomad-cluster" is not used. +path "auth/token/roles/nomad-cluster" { capabilities = ["read"] } -# Allow looking up incoming tokens to validate they have permissions to -# access the tokens they are requesting -path "auth/token/lookup/*" { - capabilities = ["read"] -} - -# Allow revoking tokens that should no longer exist -path "/auth/token/revoke-accessor/*" { +# Allow looking up incoming tokens to validate they have permissions to access +# the tokens they are requesting. This is only required if +# `allow_unauthenticated` is set to false. +path "auth/token/lookup" { + capabilities = ["update"] +} + +# Allow revoking tokens that should no longer exist. This allows revoking +# tokens for dead tasks. +path "auth/token/revoke-accessor" { + capabilities = ["update"] +} + +# Allow checking the capabilities of our own token. This is used to validate the +# token upon startup. +path "/sys/capabilities-self" { + capabilities = ["update"] +} + +# Allow our own token to be renewed. +path "auth/token/renew-self" { capabilities = ["update"] } diff --git a/website/source/docs/agent/configuration/vault.html.md b/website/source/docs/agent/configuration/vault.html.md index 9d1e012b2..e9ded6d79 100644 --- a/website/source/docs/agent/configuration/vault.html.md +++ b/website/source/docs/agent/configuration/vault.html.md @@ -119,6 +119,11 @@ vault { # should set the VAULT_TOKEN environment variable when starting the Nomad # agent token = "debecfdc-9ed7-ea22-c6ee-948f22cdd474" + + # Setting the create_from_role option causes Nomad to create tokens for tasks + # via the provided role. This allows the role to manage what policies are + # allowed and disallowed for use by tasks. + create_from_role = "nomad-server" } ``` diff --git a/website/source/docs/vault-integration/index.html.md b/website/source/docs/vault-integration/index.html.md index 7baff34b0..a57771090 100644 --- a/website/source/docs/vault-integration/index.html.md +++ b/website/source/docs/vault-integration/index.html.md @@ -13,7 +13,7 @@ Many workloads require access to tokens, passwords, certificates, API keys, and other secrets. To enable secure, auditable and easy access to your secrets, Nomad integrates with HashiCorp's [Vault][]. Nomad servers and clients coordinate with Vault to derive a Vault token that has access to only the Vault -policies the tasks needs. Nomad clients make the token avaliable to the task and +policies the tasks needs. Nomad clients make the token available to the task and handle the tokens renewal. Further, Nomad's [`template` block][template] can retrieve secrets from Vault making it easier than ever to secure your infrastructure. @@ -24,76 +24,144 @@ install Vault separately from Nomad. Nomad does not run Vault for you. ## Vault Configuration To use the Vault integration, Nomad servers must be provided a Vault token. This -token can either be a root token or a token from a role. The root token is the -easiest way to get started, but we recommend a role-based token for production -installations. Nomad servers will renew the token automatically. +token can either be a root token or a token with permissions to create from a +role. The root token is the easiest way to get started, but we recommend a +role-based token for production installations. Nomad servers will renew the +token automatically. -### Root Token +### Root Token Integration If Nomad is given a [root token](https://www.vaultproject.io/docs/concepts/tokens.html#root-tokens), no further configuration is needed as Nomad can derive a token for jobs using any Vault policies. -### Role based Token +### Role based Integration Vault's [Token Authentication Backend][auth] supports a concept called "roles". Roles allow policies to be grouped together and token creation to be delegated to a trusted service such as Nomad. By creating a role, the set of policies that tasks managed by Nomad can access may be limited compared to giving Nomad a root -token. +token. Roles allow both whitelist and blacklist management of polcies accessible +to the role. -When given a non-root token, Nomad queries the token to determine the role it -was generated from. It will then derive tokens for jobs based on that role. -Nomad expects the role to be created with several properties described below -when creating the role with the Vault endpoint `/auth/token/roles/`: +To configure Nomad and Vault to create tokens against a role, the following must +occur: + + 1. Create a set of Vault policies that can be used to generate a token for the + Nomad Servers that allow them to create from a role and manage created + tokens within the cluster. The required policies are described below. + + 2. Create a Vault role with the configuration described below. + + 3. Configure Nomad to use the created role. + + 4. Give Nomad servers a token with the policies created from step 1. The token + must also be periodic. + +#### Required Vault Policies + +The token Nomad receives must have the capabilities listed below. An explanation +for the use of each capability is given. + +``` +# Allow creating tokens under "nomad-cluster" role. The role name should be +# updated if "nomad-cluster" is not used. +path "auth/token/create/nomad-cluster" { + capabilities = ["update"] +} + +# Allow looking up "nomad-cluster" role. The role name should be updated if +# "nomad-cluster" is not used. +path "auth/token/roles/nomad-cluster" { + capabilities = ["read"] +} + +# Allow looking up incoming tokens to validate they have permissions to access +# the tokens they are requesting. This is only required if +# `allow_unauthenticated` is set to false. +path "auth/token/lookup" { + capabilities = ["update"] +} + +# Allow revoking tokens that should no longer exist. This allows revoking +# tokens for dead tasks. +path "auth/token/revoke-accessor" { + capabilities = ["update"] +} + +# Allow checking the capabilities of our own token. This is used to validate the +# token upon startup. +path "/sys/capabilities-self" { + capabilities = ["update"] +} + +# Allow our own token to be renewed. +path "auth/token/renew-self" { + capabilities = ["update"] +} +``` + +The above [`nomad-server` policy](/data/vault/nomad-server-policy.hcl) is +available for download. Below is an example of writing this policy to Vault: + +``` +# Download the policy +$ curl https://nomadproject.io/data/vault/nomad-server-policy.hcl -O -s -L + +# Write the policy to Vault +$ vault policy-write nomad-server nomad-server-policy.hcl +``` + +#### Vault Role Configuration + +A Vault role must be created for use by Nomad. The role can be used to manage +what Vault policies are accessible by jobs submitted to Nomad. The policies can +be managed as a whitelist by using `allowed_policies` in the role definition or +as a blacklist by using `disallowed_policies`. + +If using `allowed_policies`, task's may only request Vault policies that are in +the list. If `disallowed_policies` is used, task may request any policy that is +not in the `disallowed_policies` list. There are tradeoffs to both approaches +but generally it is easier to use the blacklist approach and add policies that +you would not like tasks to have access to into the `disallowed_policies` list. + +An example role definition is given below: ```json { - "allowed_policies": "", + "disallowed_policies": "nomad-server", "explicit_max_ttl": 0, - "name": "nomad", + "name": "nomad-cluster", "orphan": false, "period": 259200, "renewable": true } ``` -#### Parameters: +##### Role Requirements + +Nomad checks that role's have an appropriate configuration for use by the +cluster. Fields that are checked are documented below as well as descriptions of +the important fields. See Vault's [Token Authentication Backend][auth] +documentation for all possible fields and more complete documentation. * `allowed_policies` - Specifies the list of allowed policies as a - comma-seperated string This list should contain all policies that jobs running - under Nomad should have access to. Further, the list must contain one or more - policies that gives Nomad the following permissions: + comma-seperated string. This list should contain all policies that jobs running + under Nomad should have access to. - ```hcl - # Allow creating tokens under the role - path "auth/token/create/nomad-server" { - capabilities = ["update"] - } - - # Allow looking up the role - path "auth/token/roles/nomad-server" { - capabilities = ["read"] - } - - # Allow looking up incoming tokens to validate they have permissions to - # access the tokens they are requesting - path "auth/token/lookup/*" { - capabilities = ["read"] - } - - # Allow revoking tokens that should no longer exist - path "/auth/token/revoke-accessor/*" { - capabilities = ["update"] - } - ``` +* `disallowed_policies` - Specifies the list of disallowed policies as a + comma-seperated string. This list should contain all policies that jobs running + under Nomad should **not** have access to. The policy created above that + grants Nomad the ability to generate tokens from the role should be included + in list of disallowed policies. This prevents tokens created by Nomad from + generating new tokens with different policies than those granted by Nomad. * `explicit_max_ttl` - Specifies the max TTL of a token. Must be set to `0` to allow periodic tokens. * `name` - Specifies the name of the policy. We recommend using the name - `nomad-server`. If a different name is chosen, replace the role in the above + `nomad-cluster`. If a different name is chosen, replace the role in the above policy. * `orphan` - Specifies whether tokens created againsts this role will be @@ -108,14 +176,23 @@ when creating the role with the Vault endpoint `/auth/token/roles/`: * `renewable` - Specifies whether created tokens are renewable. Must be set to `true`. This allows Nomad to renew tokens for tasks. -See Vault's [Token Authentication Backend][auth] documentation for all possible -fields and more complete documentation. +The above [`nomad-cluster` role](/data/vault/nomad-cluster-role.hcl) is +available for download. Below is an example of writing this role to Vault: + +``` +# Download the role +$ curl https://nomadproject.io/data/vault/nomad-cluster-role.json -O -s -L + +# Create the role with Vault +$ vault write /auth/token/roles/nomad-cluster @nomad-cluster-role.json +``` + #### Example Configuration To make getting started easy, the basic [`nomad-server` policy](/data/vault/nomad-server-policy.hcl) and -[role](/data/vault/nomad-server-role.json) described above are available for +[role](/data/vault/nomad-cluster-role.json) described above are available for download. The below example assumes Vault is accessible, unsealed and the the operator has @@ -124,17 +201,13 @@ appropriate permissions. ```shell # Download the policy and role $ curl https://nomadproject.io/data/vault/nomad-server-policy.hcl -O -s -L -$ curl https://nomadproject.io/data/vault/nomad-server-role.json -O -s -L +$ curl https://nomadproject.io/data/vault/nomad-cluster-role.json -O -s -L # Write the policy to Vault $ vault policy-write nomad-server nomad-server-policy.hcl -# Edit the role to add any policies that you would like to be accessible to -# Nomad jobs in the list of allowed_policies. Do not remove `nomad-server`. -$ editor nomad-server-role.json - # Create the role with Vault -$ vault write /auth/token/roles/nomad-server @nomad-server-role.json +$ vault write /auth/token/roles/nomad-cluster @nomad-cluster-role.json ``` #### Retrieving the Role based Token @@ -143,23 +216,19 @@ After the role is created, a token suitable for the Nomad servers may be retrieved by issuing the following Vault command: ``` -$ vault token-create -role nomad-server +$ vault token-create -policy nomad-server -period 72h Key Value --- ----- token f02f01c2-c0d1-7cb7-6b88-8a14fada58c0 token_accessor 8cb7fcb3-9a4f-6fbf-0efc-83092bb0cb1c token_duration 259200s token_renewable true -token_policies [] +token_policies [default nomad-server] ``` The token can then be set in the server configuration's [vault block][config], as a command-line flag, or via an environment variable. -``` -$ nomad agent -config /path/to/config -vault-token=f02f01c2-c0d1-7cb7-6b88-8a14fada58c0 -``` - ``` $ VAULT_TOKEN=f02f01c2-c0d1-7cb7-6b88-8a14fada58c0 nomad agent -config /path/to/config ``` @@ -185,10 +254,12 @@ the reasons the token is invalid and disable Vault integration. - Vault 0.6.2 or later is needed. +# XXX - Nomad is given either a root token or a token created from an approriate role. [auth]: https://www.vaultproject.io/docs/auth/token.html "Vault Authentication Backend" -[config]: /docs/agent/configuration/vault.html "Nomad Vault configuration block" +[config]: /docs/agent/configuration/vault.html "Nomad Vault Configuration Block" +[createfromrole]: /docs/agent/configuration/vault.html#create_from_role "Nomad vault create_from_role Configuration Flag" [template]: /docs/job-specification/template.html "Nomad template Job Specification" [vault]: https://www.vaultproject.io/ "Vault by HashiCorp" [vault-spec]: /docs/job-specification/vault.html "Nomad Vault Job Specification" From 4305339f55565303cf4ba762608f71ae1dedbd78 Mon Sep 17 00:00:00 2001 From: Ryon Coleman Date: Fri, 27 Jan 2017 15:41:19 -0500 Subject: [PATCH 29/56] Add Google Tag Manager #GTM-NR2SD7C --- website/source/layouts/_meta.erb | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/website/source/layouts/_meta.erb b/website/source/layouts/_meta.erb index a09facc3f..bcb3fc783 100644 --- a/website/source/layouts/_meta.erb +++ b/website/source/layouts/_meta.erb @@ -15,7 +15,19 @@ <%= javascript_include_tag "ie-compat" %> + + + + <%= yield_content :head %> + + + From 9b11f269e3c0ec0979a7d12dc4c5d1ecacdd10bc Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Fri, 27 Jan 2017 14:11:34 -0800 Subject: [PATCH 30/56] Add command line flag for create-from-role --- command/agent/command.go | 4 ++++ website/source/docs/commands/agent.html.md.erb | 1 + 2 files changed, 5 insertions(+) diff --git a/command/agent/command.go b/command/agent/command.go index c40f8482d..0e7b368fa 100644 --- a/command/agent/command.go +++ b/command/agent/command.go @@ -120,6 +120,7 @@ func (c *Command) readConfig() *Config { }), "vault-allow-unauthenticated", "") flags.StringVar(&cmdConfig.Vault.Token, "vault-token", "", "") flags.StringVar(&cmdConfig.Vault.Addr, "vault-address", "", "") + flags.StringVar(&cmdConfig.Vault.Role, "vault-create-from-role", "", "") flags.StringVar(&cmdConfig.Vault.TLSCaFile, "vault-ca-file", "", "") flags.StringVar(&cmdConfig.Vault.TLSCaPath, "vault-ca-path", "", "") flags.StringVar(&cmdConfig.Vault.TLSCertFile, "vault-cert-file", "", "") @@ -934,6 +935,9 @@ Vault Options: This only needs to be set on Servers. Overrides the Vault token read from the VAULT_TOKEN environment variable. + -vault-create-from-role= + The role name to create tokens for tasks from. + -vault-allow-unauthenticated Whether to allow jobs to be sumbitted that request Vault Tokens but do not authentication. The flag only applies to Servers. diff --git a/website/source/docs/commands/agent.html.md.erb b/website/source/docs/commands/agent.html.md.erb index 88b46fea9..468d6ae85 100644 --- a/website/source/docs/commands/agent.html.md.erb +++ b/website/source/docs/commands/agent.html.md.erb @@ -65,6 +65,7 @@ via CLI arguments. The `agent` command accepts the following arguments: * `-vault-token=`: The Vault token used to derive tokens. Only needs to be set on Servers. Overrides the Vault token read from the VAULT_TOKEN environment variable. +* `-vault-create-from-role=`: The role name to create tokens for tasks from. * `-vault-ca-file=`: Path to a PEM-encoded CA cert file used to verify the Vault server SSL certificate. * `-vault-ca-path=`: Path to a directory of PEM-encoded CA cert files used From 8099478f2f312499bd10d5478ef65def3485f68d Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Fri, 27 Jan 2017 15:06:01 -0800 Subject: [PATCH 31/56] Cleanup and skip test --- nomad/vault_test.go | 10 ++ testutil/vault.go | 8 ++ .../docs/vault-integration/index.html.md | 123 +++++++++--------- 3 files changed, 80 insertions(+), 61 deletions(-) diff --git a/nomad/vault_test.go b/nomad/vault_test.go index e17c3bce0..53063e635 100644 --- a/nomad/vault_test.go +++ b/nomad/vault_test.go @@ -764,6 +764,16 @@ func TestVaultClient_CreateToken_Root_Target_Role(t *testing.T) { } func TestVaultClient_CreateToken_Blacklist_Role(t *testing.T) { + // Need to skip if test is 0.6.4 + version, err := testutil.VaultVersion() + if err != nil { + t.Fatalf("failed to determine version: %v", err) + } + + if strings.Contains(version, "v0.6.4") { + t.SkipNow() + } + v := testutil.NewTestVault(t).Start() defer v.Stop() diff --git a/testutil/vault.go b/testutil/vault.go index 23241194d..4ae2bcee1 100644 --- a/testutil/vault.go +++ b/testutil/vault.go @@ -123,3 +123,11 @@ func getPort() uint64 { vaultPortOffset += 1 return p } + +// VaultVersion returns the Vault version as a string or an error if it couldn't +// be determined +func VaultVersion() (string, error) { + cmd := exec.Command("vault", "version") + out, err := cmd.Output() + return string(out), err +} diff --git a/website/source/docs/vault-integration/index.html.md b/website/source/docs/vault-integration/index.html.md index a57771090..2d1130908 100644 --- a/website/source/docs/vault-integration/index.html.md +++ b/website/source/docs/vault-integration/index.html.md @@ -24,10 +24,10 @@ install Vault separately from Nomad. Nomad does not run Vault for you. ## Vault Configuration To use the Vault integration, Nomad servers must be provided a Vault token. This -token can either be a root token or a token with permissions to create from a -role. The root token is the easiest way to get started, but we recommend a -role-based token for production installations. Nomad servers will renew the -token automatically. +token can either be a root token or a periodic token with permissions to create +from a token role. The root token is the easiest way to get started, but we +recommend a token role based token for production installations. Nomad servers +will renew the token automatically. ### Root Token Integration @@ -36,28 +36,26 @@ token](https://www.vaultproject.io/docs/concepts/tokens.html#root-tokens), no further configuration is needed as Nomad can derive a token for jobs using any Vault policies. -### Role based Integration +### Token Role based Integration Vault's [Token Authentication Backend][auth] supports a concept called "roles". -Roles allow policies to be grouped together and token creation to be delegated -to a trusted service such as Nomad. By creating a role, the set of policies that -tasks managed by Nomad can access may be limited compared to giving Nomad a root -token. Roles allow both whitelist and blacklist management of polcies accessible -to the role. +Token roles allow policies to be grouped together and token creation to be +delegated to a trusted service such as Nomad. By creating a token role, the set +of policies that tasks managed by Nomad can access may be limited compared to +giving Nomad a root token. Token roles allow both white-list and blacklist +management of policies accessible to the role. To configure Nomad and Vault to create tokens against a role, the following must occur: - 1. Create a set of Vault policies that can be used to generate a token for the - Nomad Servers that allow them to create from a role and manage created - tokens within the cluster. The required policies are described below. + 1. Create a "nomad-server" policy used by Nomad to create and manage tokens. - 2. Create a Vault role with the configuration described below. + 2. Create a Vault token role with the configuration described below. - 3. Configure Nomad to use the created role. + 3. Configure Nomad to use the created token role. - 4. Give Nomad servers a token with the policies created from step 1. The token - must also be periodic. + 4. Give Nomad servers a periodic token with the "nomad-server" policy created + above. #### Required Vault Policies @@ -65,14 +63,14 @@ The token Nomad receives must have the capabilities listed below. An explanation for the use of each capability is given. ``` -# Allow creating tokens under "nomad-cluster" role. The role name should be -# updated if "nomad-cluster" is not used. +# Allow creating tokens under "nomad-cluster" token role. The token role name +# should be updated if "nomad-cluster" is not used. path "auth/token/create/nomad-cluster" { capabilities = ["update"] } -# Allow looking up "nomad-cluster" role. The role name should be updated if -# "nomad-cluster" is not used. +# Allow looking up "nomad-cluster" token role. The token role name should be +# updated if "nomad-cluster" is not used. path "auth/token/roles/nomad-cluster" { capabilities = ["read"] } @@ -113,12 +111,12 @@ $ curl https://nomadproject.io/data/vault/nomad-server-policy.hcl -O -s -L $ vault policy-write nomad-server nomad-server-policy.hcl ``` -#### Vault Role Configuration +#### Vault Token Role Configuration -A Vault role must be created for use by Nomad. The role can be used to manage -what Vault policies are accessible by jobs submitted to Nomad. The policies can -be managed as a whitelist by using `allowed_policies` in the role definition or -as a blacklist by using `disallowed_policies`. +A Vault token role must be created for use by Nomad. The token role can be used +to manage what Vault policies are accessible by jobs submitted to Nomad. The +policies can be managed as a whitelist by using `allowed_policies` in the token +role definition or as a blacklist by using `disallowed_policies`. If using `allowed_policies`, task's may only request Vault policies that are in the list. If `disallowed_policies` is used, task may request any policy that is @@ -126,7 +124,7 @@ not in the `disallowed_policies` list. There are tradeoffs to both approaches but generally it is easier to use the blacklist approach and add policies that you would not like tasks to have access to into the `disallowed_policies` list. -An example role definition is given below: +An example token role definition is given below: ```json { @@ -139,51 +137,57 @@ An example role definition is given below: } ``` -##### Role Requirements +##### Token Role Requirements -Nomad checks that role's have an appropriate configuration for use by the +Nomad checks that token role has an appropriate configuration for use by the cluster. Fields that are checked are documented below as well as descriptions of the important fields. See Vault's [Token Authentication Backend][auth] documentation for all possible fields and more complete documentation. * `allowed_policies` - Specifies the list of allowed policies as a - comma-seperated string. This list should contain all policies that jobs running + comma-separated string. This list should contain all policies that jobs running under Nomad should have access to. -* `disallowed_policies` - Specifies the list of disallowed policies as a - comma-seperated string. This list should contain all policies that jobs running - under Nomad should **not** have access to. The policy created above that - grants Nomad the ability to generate tokens from the role should be included - in list of disallowed policies. This prevents tokens created by Nomad from - generating new tokens with different policies than those granted by Nomad. +* `disallowed_policies` - Specifies the list of disallowed policies as a + comma-seperated string. This list should contain all policies that jobs running + under Nomad should **not** have access to. The policy created above that + grants Nomad the ability to generate tokens from the token role should be + included in list of disallowed policies. This prevents tokens created by + Nomad from generating new tokens with different policies than those granted + by Nomad. -* `explicit_max_ttl` - Specifies the max TTL of a token. Must be set to `0` to + A regression occured in Vault 0.6.4 when validating token creation using a + token role with `disallowed_policies` such that it is not usable with + Nomad. This will be remedied in 0.6.5 and does not effect earlier versions + of Vault. + +* `explicit_max_ttl` - Specifies the max TTL of a token. **Must be set to `0`** to allow periodic tokens. * `name` - Specifies the name of the policy. We recommend using the name - `nomad-cluster`. If a different name is chosen, replace the role in the above - policy. + `nomad-cluster`. If a different name is chosen, replace the token role in the + above policy. -* `orphan` - Specifies whether tokens created againsts this role will be - orphaned and have no parents. Must be set to `false`. This ensures that the +* `orphan` - Specifies whether tokens created against this token role will be + orphaned and have no parents. **Must be set to `false`**. This ensures that the token can be revoked when the task is no longer needed or a node dies. * `period` - Specifies the length the TTL is extended by each renewal in seconds. It is suggested to set this value on the order of magnitude of 3 days - (259200 seconds) to avoid a large renewal request rate to Vault. Must be set - to a positive value. + (259200 seconds) to avoid a large renewal request rate to Vault. **Must be set + to a positive value**. -* `renewable` - Specifies whether created tokens are renewable. Must be set to - `true`. This allows Nomad to renew tokens for tasks. +* `renewable` - Specifies whether created tokens are renewable. **Must be set to + `true`**. This allows Nomad to renew tokens for tasks. -The above [`nomad-cluster` role](/data/vault/nomad-cluster-role.hcl) is +The above [`nomad-cluster` token role](/data/vault/nomad-cluster-role.hcl) is available for download. Below is an example of writing this role to Vault: ``` -# Download the role +# Download the token role $ curl https://nomadproject.io/data/vault/nomad-cluster-role.json -O -s -L -# Create the role with Vault +# Create the token role with Vault $ vault write /auth/token/roles/nomad-cluster @nomad-cluster-role.json ``` @@ -192,27 +196,27 @@ $ vault write /auth/token/roles/nomad-cluster @nomad-cluster-role.json To make getting started easy, the basic [`nomad-server` policy](/data/vault/nomad-server-policy.hcl) and -[role](/data/vault/nomad-cluster-role.json) described above are available for -download. +[`nomad-cluster` role](/data/vault/nomad-cluster-role.json) described above are +available for download. -The below example assumes Vault is accessible, unsealed and the the operator has +The below example assumes Vault is accessible, unsealed and the operator has appropriate permissions. ```shell -# Download the policy and role +# Download the policy and token role $ curl https://nomadproject.io/data/vault/nomad-server-policy.hcl -O -s -L $ curl https://nomadproject.io/data/vault/nomad-cluster-role.json -O -s -L # Write the policy to Vault $ vault policy-write nomad-server nomad-server-policy.hcl -# Create the role with Vault +# Create the token role with Vault $ vault write /auth/token/roles/nomad-cluster @nomad-cluster-role.json ``` -#### Retrieving the Role based Token +#### Retrieving the Token Role based Token -After the role is created, a token suitable for the Nomad servers may be +After the token role is created, a token suitable for the Nomad servers may be retrieved by issuing the following Vault command: ``` @@ -246,17 +250,14 @@ specification documentation][vault-spec]. ## Troubleshooting Upon startup, Nomad will attempt to connect to the specified Vault server. Nomad -will lookup the passed token and if the token is from a role, the role will be -validated. Nomad will not shutdown if given an invalid Vault token, but will log -the reasons the token is invalid and disable Vault integration. +will lookup the passed token and if the token is from a token role, the token +role will be validated. Nomad will not shutdown if given an invalid Vault token, +but will log the reasons the token is invalid and disable Vault integration. ## Assumptions - Vault 0.6.2 or later is needed. -# XXX -- Nomad is given either a root token or a token created from an approriate role. - [auth]: https://www.vaultproject.io/docs/auth/token.html "Vault Authentication Backend" [config]: /docs/agent/configuration/vault.html "Nomad Vault Configuration Block" [createfromrole]: /docs/agent/configuration/vault.html#create_from_role "Nomad vault create_from_role Configuration Flag" From 12e10478b5d5bf167c57f5e41ec3d65e861e1300 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Fri, 27 Jan 2017 15:10:59 -0800 Subject: [PATCH 32/56] small fixes --- website/source/docs/cluster/requirements.html.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/source/docs/cluster/requirements.html.md b/website/source/docs/cluster/requirements.html.md index 7c48089a4..8f22143df 100644 --- a/website/source/docs/cluster/requirements.html.md +++ b/website/source/docs/cluster/requirements.html.md @@ -57,15 +57,15 @@ of having a single "global" region and many datacenter. ## Ports Used -Nomad requires up to 3 different ports to work properly, some on -TCP, UDP, or both protocols. Below we document the requirements for each +Nomad requires 3 different ports to work properly on servers and 2 on clients, +some on TCP, UDP, or both protocols. Below we document the requirements for each port. -* HTTP API (Default 4646). This is used by clients to talk to the HTTP +* HTTP API (Default 4646). This is used by clients and servers to serve the HTTP API. TCP only. -* Server RPC (Default 4647). This is used by servers to handle incoming - requests from other agents. TCP only. +* RPC (Default 4647). This is used by servers and clients to communicate amongst + each other. TCP only. -* Serf WAN (Default 4648). This is used by servers to gossip over the - WAN to other servers. TCP and UDP. +* Serf WAN (Default 4648). This is used by servers to gossip over the WAN to + other servers. TCP and UDP. From 0b3cf611947b3ff24c66aab00c230a7970f19dc7 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Fri, 27 Jan 2017 15:19:22 -0800 Subject: [PATCH 33/56] explain the skip --- nomad/vault_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nomad/vault_test.go b/nomad/vault_test.go index 53063e635..28812f0f1 100644 --- a/nomad/vault_test.go +++ b/nomad/vault_test.go @@ -771,7 +771,7 @@ func TestVaultClient_CreateToken_Blacklist_Role(t *testing.T) { } if strings.Contains(version, "v0.6.4") { - t.SkipNow() + t.Skipf("Vault has a regression in v0.6.4 that this test hits") } v := testutil.NewTestVault(t).Start() From c5f9144e32b94d054b8e292bb696930fae9a5450 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Tue, 24 Jan 2017 16:55:02 -0800 Subject: [PATCH 34/56] Split dev build into its own script The dev build is far simpler than the release build, so move it to its own shell script. This simplifies the release build script slightly as well at the cost of duplicating the version/tag logic. Also don't even try to check for LXC if not running on Linux. I don't think we want to try to support cross-compiling LXC from non-Linux hosts. --- GNUmakefile | 5 ++--- scripts/build-dev.sh | 16 ++++++++++++++++ scripts/build.sh | 43 ++++++++++++++++--------------------------- 3 files changed, 34 insertions(+), 30 deletions(-) create mode 100755 scripts/build-dev.sh diff --git a/GNUmakefile b/GNUmakefile index 117fcf925..b3ad7d3f8 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -14,7 +14,7 @@ GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*") all: test dev: format generate - @NOMAD_DEV=1 sh -c "'$(PWD)/scripts/build.sh'" + @scripts/build-dev.sh bin: generate @sh -c "'$(PWD)/scripts/build.sh'" @@ -45,8 +45,7 @@ format: generate: @echo "--> Running go generate" @go generate $(PACKAGES) - @sed -e 's|github.com/hashicorp/nomad/vendor/github.com/ugorji/go/codec|github.com/ugorji/go/codec|' nomad/structs/structs.generated.go >> structs.gen.tmp - @mv structs.gen.tmp nomad/structs/structs.generated.go + @sed -i -e 's|github.com/hashicorp/nomad/vendor/github.com/ugorji/go/codec|github.com/ugorji/go/codec|' nomad/structs/structs.generated.go vet: @go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ diff --git a/scripts/build-dev.sh b/scripts/build-dev.sh new file mode 100755 index 000000000..abc1126e9 --- /dev/null +++ b/scripts/build-dev.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -e + +GIT_COMMIT="$(git rev-parse HEAD)" +GIT_DIRTY="$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)" +LDFLAG="main.GitCommit=${GIT_COMMIT}${GIT_DIRTY}" + +TAGS="nomad_test" +if [[ $(uname) == "Linux" ]]; then + if pkg-config --exists lxc; then + TAGS="$TAGS lxc" + fi +fi + +echo "--> Installing with tags: $TAGS" +go install -ldflags "-X $LDFLAG" -tags "${TAGS}" diff --git a/scripts/build.sh b/scripts/build.sh index 393c1f342..f27623e64 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -29,12 +29,6 @@ rm -f bin/* rm -rf pkg/* mkdir -p bin/ -# If its dev mode, only build for ourself -if [[ "${NOMAD_DEV}" ]]; then - XC_OS=$(go env GOOS) - XC_ARCH=$(go env GOARCH) -fi - # Build! echo "==> Building..." gox \ @@ -47,16 +41,13 @@ gox \ . echo "" -if pkg-config --exists lxc; then - echo "==> Building linux_amd64-lxc..." - go build \ - -tags lxc \ - -ldflags "-X main.GitCommit='${GIT_COMMIT}${GIT_DIRTY}+lxc'" \ - -o "pkg/linux_amd64-lxc/nomad" -else - if [[ "${NOMAD_DEV}" ]]; then - # No lxc in dev mode is no problem - echo "LXC not installed; skipping" +if [[ $(uname) == "Linux" ]]; then + if pkg-config --exists lxc; then + echo "==> Building linux_amd64-lxc..." + go build \ + -tags lxc \ + -ldflags "-X main.GitCommit='${GIT_COMMIT}${GIT_DIRTY}+lxc'" \ + -o "pkg/linux_amd64-lxc/nomad" else # Require LXC for release mode echo "LXC not installed; install lxc-dev to build release binaries" @@ -82,18 +73,16 @@ for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f); do cp ${F} ${MAIN_GOPATH}/bin/ done -if [[ "x${NOMAD_DEV}" == "x" ]]; then - # Zip and copy to the dist dir - echo "==> Packaging..." - for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do - OSARCH=$(basename ${PLATFORM}) - echo "--> ${OSARCH}" +# Zip and copy to the dist dir +echo "==> Packaging..." +for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do + OSARCH=$(basename ${PLATFORM}) + echo "--> ${OSARCH}" - pushd $PLATFORM >/dev/null 2>&1 - zip ../${OSARCH}.zip ./* - popd >/dev/null 2>&1 - done -fi + pushd $PLATFORM >/dev/null 2>&1 + zip ../${OSARCH}.zip ./* + popd >/dev/null 2>&1 +done # Done! echo From 2f0550cfc4bc8e8ecc745174a10cee25fcec136f Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Wed, 25 Jan 2017 10:54:08 -0800 Subject: [PATCH 35/56] Cache build deps prior to running tests --- scripts/test.sh | 9 +++++++-- scripts/travis.sh | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/scripts/test.sh b/scripts/test.sh index 097003e76..bc163dd6b 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -1,7 +1,12 @@ #!/usr/bin/env bash set -e -GOTEST_TAGS="nomad_test lxc" +GOTEST_TAGS="nomad_test" +if [[ $(uname) == "Linux" ]]; then + if pkg-config --exists lxc; then + GOTEST_TAGS="$GOTEST_TAGS lxc" + fi +fi # Create a temp dir and clean it up on exit TEMPDIR=`mktemp -d -t nomad-test.XXX` @@ -9,7 +14,7 @@ trap "rm -rf $TEMPDIR" EXIT HUP INT QUIT TERM # Build the Nomad binary for the API tests echo "--> Building nomad" -go build -tags "$GOTEST_TAGS" -o $TEMPDIR/nomad || exit 1 +go build -i -tags "$GOTEST_TAGS" -o $TEMPDIR/nomad || exit 1 # Run the tests echo "--> Running tests" diff --git a/scripts/travis.sh b/scripts/travis.sh index 4bb85a1f2..bc181faae 100755 --- a/scripts/travis.sh +++ b/scripts/travis.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash set -e -export PING_SLEEP=30 +export PING_SLEEP=60 bash -c "while true; do echo \$(date) - building ...; sleep $PING_SLEEP; done" & PING_LOOP_PID=$! From b9da18efc8085018c92a39118f4c5c2473bb3931 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Wed, 25 Jan 2017 11:16:54 -0800 Subject: [PATCH 36/56] Install crosscompile dep to build arm in Vagrant --- Vagrantfile | 2 +- scripts/build.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 23a8de8b8..591c37c14 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -10,7 +10,7 @@ GO_VERSION="1.7.4" # Install Prereq Packages sudo apt-get update -sudo apt-get install -y build-essential curl git-core mercurial bzr libpcre3-dev pkg-config zip default-jre qemu libc6-dev-i386 silversearcher-ag jq htop vim unzip liblxc1 lxc-dev +sudo apt-get install -y build-essential curl git-core mercurial bzr libpcre3-dev pkg-config zip default-jre qemu gcc-4.8-arm-linux-gnueabihf libc6-dev-i386 silversearcher-ag jq htop vim unzip liblxc1 lxc-dev # Setup go, for development of Nomad SRCROOT="/opt/go" diff --git a/scripts/build.sh b/scripts/build.sh index f27623e64..42f776821 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -19,7 +19,7 @@ GIT_DIRTY="$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)" # XC_ARCH=${XC_ARCH:-"386 amd64"} # XC_OS=${XC_OS:-linux} -XC_ARCH=${XC_ARCH:-"386 amd64"} +XC_ARCH=${XC_ARCH:-"386 amd64 arm arm64"} XC_OS=${XC_OS:-"linux"} XC_EXCLUDE=${XC_EXCLUDE:-"!darwin/arm !darwin/386"} From d84660a047c3ad7dbb49661fc194b80d61ec95f8 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Thu, 26 Jan 2017 16:42:17 -0800 Subject: [PATCH 37/56] Split out massive list of cross compile deps+cmds --- Vagrantfile | 15 ++++++++++-- scripts/build.sh | 60 ++++++++++++++++++++++-------------------------- 2 files changed, 41 insertions(+), 34 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 591c37c14..b1bb330f9 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -8,9 +8,20 @@ DEFAULT_CPU_COUNT = 2 $script = <