fixed conflicts

This commit is contained in:
Diptanu Choudhury
2017-01-31 13:20:58 -08:00
58 changed files with 1303 additions and 564 deletions

View File

@@ -1,9 +1,10 @@
## 0.5.3 (Unreleased)
## 0.5.3 (January 30, 2017)
IMPROVEMENTS:
* api: Added APIs for requesting GC of allocations [GH-2192]
* core: Introduce Parameterized Jobs and Dispatch command/API [GH-2128]
* core: Introduce parameterized jobs and dispatch command/API [GH-2128]
* core: Cancel blocked evals upon successful one for job [GH-2155]
* api: Added APIs for requesting GC of allocations [GH-2192]
* api: Job summary endpoint includes summary status for child jobs [GH-2128]
* api/client: Plain text log streaming suitable for viewing logs in a browser
[GH-2235]
* cli: Defaulting to showing allocations which belong to currently registered

View File

@@ -14,7 +14,7 @@ GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*")
all: test
dev: format generate
@NOMAD_DEV=1 sh -c "'$(PWD)/scripts/build.sh'"
@scripts/build-dev.sh
bin: generate
@sh -c "'$(PWD)/scripts/build.sh'"
@@ -45,8 +45,7 @@ format:
generate:
@echo "--> Running go generate"
@go generate $(PACKAGES)
@sed -e 's|github.com/hashicorp/nomad/vendor/github.com/ugorji/go/codec|github.com/ugorji/go/codec|' nomad/structs/structs.generated.go >> structs.gen.tmp
@mv structs.gen.tmp nomad/structs/structs.generated.go
@sed -i.old -e 's|github.com/hashicorp/nomad/vendor/github.com/ugorji/go/codec|github.com/ugorji/go/codec|' nomad/structs/structs.generated.go
vet:
@go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \

54
Vagrantfile vendored
View File

@@ -6,11 +6,21 @@ VAGRANTFILE_API_VERSION = "2"
DEFAULT_CPU_COUNT = 2
$script = <<SCRIPT
GO_VERSION="1.7.4"
GO_VERSION="1.7.5"
# Install Prereq Packages
export DEBIAN_FRONTEND=noninteractive
sudo dpkg --add-architecture i386
sudo apt-get update
sudo apt-get install -y build-essential curl git-core mercurial bzr libpcre3-dev pkg-config zip default-jre qemu libc6-dev-i386 silversearcher-ag jq htop vim unzip liblxc1 lxc-dev
# Install base dependencies
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential curl git-core mercurial bzr \
libpcre3-dev pkg-config zip default-jre qemu silversearcher-ag \
jq htop vim unzip tree \
liblxc1 lxc-dev lxc-templates \
gcc-5-aarch64-linux-gnu binutils-aarch64-linux-gnu \
libc6-dev-i386 linux-libc-dev:i386 \
gcc-5-arm-linux-gnueabi gcc-5-multilib-arm-linux-gnueabi binutils-arm-linux-gnueabi
# Setup go, for development of Nomad
SRCROOT="/opt/go"
@@ -20,18 +30,22 @@ SRCPATH="/opt/gopath"
ARCH=`uname -m | sed 's|i686|386|' | sed 's|x86_64|amd64|'`
# Install Go
cd /tmp
wget -q https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${ARCH}.tar.gz
tar -xf go${GO_VERSION}.linux-${ARCH}.tar.gz
sudo mv go $SRCROOT
sudo chmod 775 $SRCROOT
sudo chown vagrant:vagrant $SRCROOT
if [[ $(go version) == "go version go${GO_VERSION} linux/${ARCH}" ]]; then
echo "Go ${GO_VERSION} ${ARCH} already installed; Skipping"
else
cd /tmp
wget -q https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${ARCH}.tar.gz
tar -xf go${GO_VERSION}.linux-${ARCH}.tar.gz
sudo mv go $SRCROOT
sudo chmod 775 $SRCROOT
sudo chown ubuntu:ubuntu $SRCROOT
fi
# Setup the GOPATH; even though the shared folder spec gives the working
# directory the right user/group, we need to set it properly on the
# parent path to allow subsequent "go get" commands to work.
sudo mkdir -p $SRCPATH
sudo chown -R vagrant:vagrant $SRCPATH 2>/dev/null || true
sudo chown -R ubuntu:ubuntu $SRCPATH 2>/dev/null || true
# ^^ silencing errors here because we expect this to fail for the shared folder
cat <<EOF >/tmp/gopath.sh
@@ -44,16 +58,20 @@ sudo chmod 0755 /etc/profile.d/gopath.sh
source /etc/profile.d/gopath.sh
# Install Docker
echo deb https://apt.dockerproject.org/repo ubuntu-`lsb_release -c | awk '{print $2}'` main | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo apt-get update
sudo apt-get install -y docker-engine
if [[ -f /etc/apt/sources.list.d/docker.list ]]; then
echo "Docker repository already installed; Skipping"
else
echo deb https://apt.dockerproject.org/repo ubuntu-`lsb_release -c | awk '{print $2}'` main | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo apt-get update
fi
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y docker-engine
# Restart docker to make sure we get the latest version of the daemon if there is an upgrade
sudo service docker restart
# Make sure we can actually use docker as the vagrant user
sudo usermod -aG docker vagrant
# Make sure we can actually use docker as the ubuntu user
sudo usermod -aG docker ubuntu
# Setup Nomad for development
cd /opt/gopath/src/github.com/hashicorp/nomad && make bootstrap
@@ -75,7 +93,7 @@ def configureVM(vmCfg, vmParams={
numCPUs: DEFAULT_CPU_COUNT,
}
)
vmCfg.vm.box = "cbednarski/ubuntu-1404"
vmCfg.vm.box = "bento/ubuntu-16.04" # 16.04 LTS
vmCfg.vm.provision "shell", inline: $script, privileged: false
vmCfg.vm.synced_folder '.', '/opt/gopath/src/github.com/hashicorp/nomad'
@@ -87,7 +105,6 @@ def configureVM(vmCfg, vmParams={
memory = 2048
vmCfg.vm.provider "parallels" do |p, o|
o.vm.box = "parallels/ubuntu-14.04"
p.memory = memory
p.cpus = cpus
end
@@ -99,6 +116,7 @@ def configureVM(vmCfg, vmParams={
["vmware_fusion", "vmware_workstation"].each do |p|
vmCfg.vm.provider p do |v|
v.enable_vmrun_ip_lookup = false
v.gui = false
v.memory = memory
v.cpus = cpus

View File

@@ -141,28 +141,28 @@ type LogConfig struct {
MaxFileSizeMB int
}
// DispatchInputConfig configures how a task gets its input from a job dispatch
type DispatchInputConfig struct {
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
type DispatchPayloadConfig struct {
File string
}
// Task is a single process in a task group.
type Task struct {
Name string
Driver string
User string
Config map[string]interface{}
Constraints []*Constraint
Env map[string]string
Services []Service
Resources *Resources
Meta map[string]string
KillTimeout time.Duration
LogConfig *LogConfig
Artifacts []*TaskArtifact
Vault *Vault
Templates []*Template
DispatchInput *DispatchInputConfig
Name string
Driver string
User string
Config map[string]interface{}
Constraints []*Constraint
Env map[string]string
Services []Service
Resources *Resources
Meta map[string]string
KillTimeout time.Duration
LogConfig *LogConfig
Artifacts []*TaskArtifact
Vault *Vault
Templates []*Template
DispatchPayload *DispatchPayloadConfig
}
// TaskArtifact is used to download artifacts before running a task.

View File

@@ -104,6 +104,10 @@ func (r *CreatedResources) Remove(k, needle string) bool {
// Copy returns a new deep copy of CreatedResrouces.
func (r *CreatedResources) Copy() *CreatedResources {
if r == nil {
return nil
}
newr := CreatedResources{
Resources: make(map[string][]string, len(r.Resources)),
}
@@ -165,7 +169,8 @@ type Driver interface {
Open(ctx *ExecContext, handleID string) (DriverHandle, error)
// Cleanup is called to remove resources which were created for a task
// and no longer needed.
// and no longer needed. Cleanup is not called if CreatedResources is
// nil.
//
// If Cleanup returns a recoverable error it may be retried. On retry
// it will be passed the same CreatedResources, so all successfully

View File

@@ -158,6 +158,7 @@ func setupTaskEnv(t *testing.T, driver string) (*allocdir.TaskDir, map[string]st
alloc := mock.Alloc()
alloc.Job.TaskGroups[0].Tasks[0] = task
alloc.Name = "Bar"
alloc.TaskResources["web"].Networks[0].DynamicPorts[0].Value = 2000
conf := testConfig()
allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(conf.AllocDir, alloc.ID))
taskDir := allocDir.NewTaskDir(task.Name)

View File

@@ -0,0 +1,56 @@
package executor
import (
"log"
"os"
"strings"
"testing"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/client/testutil"
)
func TestExecScriptCheckWithIsolation(t *testing.T) {
testutil.ExecCompatible(t)
execCmd := ExecCommand{Cmd: "/bin/echo", Args: []string{"hello world"}}
ctx, allocDir := testExecutorContextWithChroot(t)
defer allocDir.Destroy()
execCmd.FSIsolation = true
execCmd.ResourceLimits = true
execCmd.User = dstructs.DefaultUnpriviledgedUser
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
if err := executor.SetContext(ctx); err != nil {
t.Fatalf("Unexpected error")
}
_, err := executor.LaunchCmd(&execCmd)
if err != nil {
t.Fatalf("error in launching command: %v", err)
}
check := &ExecScriptCheck{
id: "foo",
cmd: "/bin/echo",
args: []string{"hello", "world"},
taskDir: ctx.TaskDir,
FSIsolation: true,
}
res := check.Run()
expectedOutput := "hello world"
expectedExitCode := 0
if res.Err != nil {
t.Fatalf("err: %v", res.Err)
}
if strings.TrimSpace(res.Output) != expectedOutput {
t.Fatalf("output expected: %v, actual: %v", expectedOutput, res.Output)
}
if res.ExitCode != expectedExitCode {
t.Fatalf("exitcode expected: %v, actual: %v", expectedExitCode, res.ExitCode)
}
}

View File

@@ -9,7 +9,6 @@ import (
docker "github.com/fsouza/go-dockerclient"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/client/testutil"
)
@@ -37,51 +36,6 @@ func TestExecScriptCheckNoIsolation(t *testing.T) {
}
}
func TestExecScriptCheckWithIsolation(t *testing.T) {
testutil.ExecCompatible(t)
execCmd := ExecCommand{Cmd: "/bin/echo", Args: []string{"hello world"}}
ctx, allocDir := testExecutorContextWithChroot(t)
defer allocDir.Destroy()
execCmd.FSIsolation = true
execCmd.ResourceLimits = true
execCmd.User = dstructs.DefaultUnpriviledgedUser
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
if err := executor.SetContext(ctx); err != nil {
t.Fatalf("Unexpected error")
}
_, err := executor.LaunchCmd(&execCmd)
if err != nil {
t.Fatalf("error in launching command: %v", err)
}
check := &ExecScriptCheck{
id: "foo",
cmd: "/bin/echo",
args: []string{"hello", "world"},
taskDir: ctx.TaskDir,
FSIsolation: true,
}
res := check.Run()
expectedOutput := "hello world"
expectedExitCode := 0
if res.Err != nil {
t.Fatalf("err: %v", res.Err)
}
if strings.TrimSpace(res.Output) != expectedOutput {
t.Fatalf("output expected: %v, actual: %v", expectedOutput, res.Output)
}
if res.ExitCode != expectedExitCode {
t.Fatalf("exitcode expected: %v, actual: %v", expectedExitCode, res.ExitCode)
}
}
func TestDockerScriptCheck(t *testing.T) {
if !testutil.DockerIsConnected(t) {
return

View File

@@ -8,6 +8,7 @@ import (
"strconv"
"strings"
"testing"
"time"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/driver/env"
@@ -136,3 +137,54 @@ ld.so.conf.d/`
t.Fatalf("Command output incorrectly: want %v; got %v", expected, act)
}
}
func TestExecutor_ClientCleanup(t *testing.T) {
testutil.ExecCompatible(t)
ctx, allocDir := testExecutorContextWithChroot(t)
ctx.Task.LogConfig.MaxFiles = 1
ctx.Task.LogConfig.MaxFileSizeMB = 300
defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
if err := executor.SetContext(ctx); err != nil {
t.Fatalf("Unexpected error")
}
// Need to run a command which will produce continuous output but not
// too quickly to ensure executor.Exit() stops the process.
execCmd := ExecCommand{Cmd: "/bin/bash", Args: []string{"-c", "while true; do /bin/echo X; /bin/sleep 1; done"}}
execCmd.FSIsolation = true
execCmd.ResourceLimits = true
execCmd.User = "nobody"
ps, err := executor.LaunchCmd(&execCmd)
if err != nil {
t.Fatalf("error in launching command: %v", err)
}
if ps.Pid == 0 {
t.Fatalf("expected process to start and have non zero pid")
}
time.Sleep(500 * time.Millisecond)
if err := executor.Exit(); err != nil {
t.Fatalf("err: %v", err)
}
file := filepath.Join(ctx.LogDir, "web.stdout.0")
finfo, err := os.Stat(file)
if err != nil {
t.Fatalf("error stating stdout file: %v", err)
}
if finfo.Size() == 0 {
t.Fatal("Nothing in stdout; expected at least one byte.")
}
time.Sleep(2 * time.Second)
finfo1, err := os.Stat(file)
if err != nil {
t.Fatalf("error stating stdout file: %v", err)
}
if finfo.Size() != finfo1.Size() {
t.Fatalf("Expected size: %v, actual: %v", finfo.Size(), finfo1.Size())
}
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/driver/env"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
tu "github.com/hashicorp/nomad/testutil"
@@ -187,57 +186,6 @@ func TestExecutor_WaitExitSignal(t *testing.T) {
}
}
func TestExecutor_ClientCleanup(t *testing.T) {
testutil.ExecCompatible(t)
ctx, allocDir := testExecutorContextWithChroot(t)
ctx.Task.LogConfig.MaxFiles = 1
ctx.Task.LogConfig.MaxFileSizeMB = 300
defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
if err := executor.SetContext(ctx); err != nil {
t.Fatalf("Unexpected error")
}
// Need to run a command which will produce continuous output but not
// too quickly to ensure executor.Exit() stops the process.
execCmd := ExecCommand{Cmd: "/bin/bash", Args: []string{"-c", "while true; do /bin/echo X; /bin/sleep 1; done"}}
execCmd.FSIsolation = true
execCmd.ResourceLimits = true
execCmd.User = "nobody"
ps, err := executor.LaunchCmd(&execCmd)
if err != nil {
t.Fatalf("error in launching command: %v", err)
}
if ps.Pid == 0 {
t.Fatalf("expected process to start and have non zero pid")
}
time.Sleep(500 * time.Millisecond)
if err := executor.Exit(); err != nil {
t.Fatalf("err: %v", err)
}
file := filepath.Join(ctx.LogDir, "web.stdout.0")
finfo, err := os.Stat(file)
if err != nil {
t.Fatalf("error stating stdout file: %v", err)
}
if finfo.Size() == 0 {
t.Fatal("Nothing in stdout; expected at least one byte.")
}
time.Sleep(2 * time.Second)
finfo1, err := os.Stat(file)
if err != nil {
t.Fatalf("error stating stdout file: %v", err)
}
if finfo.Size() != finfo1.Size() {
t.Fatalf("Expected size: %v, actual: %v", finfo.Size(), finfo1.Size())
}
}
func TestExecutor_Start_Kill(t *testing.T) {
execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"10 && hello world"}}
ctx, allocDir := testExecutorContext(t)

View File

@@ -131,6 +131,10 @@ func (m *MockDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
// Config.Options["cleanup_fail_num"] times. For failures it will return a
// recoverable error.
func (m *MockDriver) Cleanup(ctx *ExecContext, res *CreatedResources) error {
if res == nil {
panic("Cleanup should not be called with nil *CreatedResources")
}
var err error
failn, _ := strconv.Atoi(m.config.Options["cleanup_fail_num"])
failk := m.config.Options["cleanup_fail_on"]

View File

@@ -753,9 +753,9 @@ func (r *TaskRunner) prestart(resultCh chan bool) {
// If the job is a dispatch job and there is a payload write it to disk
requirePayload := len(r.alloc.Job.Payload) != 0 &&
(r.task.DispatchInput != nil && r.task.DispatchInput.File != "")
(r.task.DispatchPayload != nil && r.task.DispatchPayload.File != "")
if !r.payloadRendered && requirePayload {
renderTo := filepath.Join(r.taskDir.LocalDir, r.task.DispatchInput.File)
renderTo := filepath.Join(r.taskDir.LocalDir, r.task.DispatchPayload.File)
decoded, err := snappy.Decode(nil, r.alloc.Job.Payload)
if err != nil {
r.setState(
@@ -1033,6 +1033,11 @@ func (r *TaskRunner) cleanup() {
res := r.createdResources.Copy()
r.createdResourcesLock.Unlock()
if res == nil {
// No created resources to cleanup
return
}
ctx := driver.NewExecContext(r.taskDir, r.alloc.ID)
attempts := 1
var cleanupErr error

View File

@@ -111,6 +111,28 @@ func testTaskRunnerFromAlloc(t *testing.T, restarts bool, alloc *structs.Allocat
return &taskRunnerTestCtx{upd, tr, allocDir}
}
// testWaitForTaskToStart waits for the task to or fails the test
func testWaitForTaskToStart(t *testing.T, ctx *taskRunnerTestCtx) {
// Wait for the task to start
testutil.WaitForResult(func() (bool, error) {
if l := len(ctx.upd.events); l < 2 {
return false, fmt.Errorf("Expect two events; got %v", l)
}
if ctx.upd.events[0].Type != structs.TaskReceived {
return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
}
if ctx.upd.events[1].Type != structs.TaskStarted {
return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
}
func TestTaskRunner_SimpleRun(t *testing.T) {
ctestutil.ExecCompatible(t)
ctx := testTaskRunner(t, false)
@@ -195,23 +217,8 @@ func TestTaskRunner_Destroy(t *testing.T) {
ctx.tr.task.Config["args"] = []string{"1000"}
go ctx.tr.Run()
testutil.WaitForResult(func() (bool, error) {
if l := len(ctx.upd.events); l != 2 {
return false, fmt.Errorf("Expect two events; got %v", l)
}
if ctx.upd.events[0].Type != structs.TaskReceived {
return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
}
if ctx.upd.events[1].Type != structs.TaskStarted {
return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
// Wait for the task to start
testWaitForTaskToStart(t, ctx)
// Make sure we are collecting a few stats
time.Sleep(2 * time.Second)
@@ -282,7 +289,7 @@ func TestTaskRunner_Update(t *testing.T) {
ctx.tr.Update(updateAlloc)
// Wait for ctx.upd.te to take place
// Wait for ctx.update to take place
testutil.WaitForResult(func() (bool, error) {
if ctx.tr.task == newTask {
return false, fmt.Errorf("We copied the pointer! This would be very bad")
@@ -321,23 +328,7 @@ func TestTaskRunner_SaveRestoreState(t *testing.T) {
defer ctx.Cleanup()
// Wait for the task to be running and then snapshot the state
testutil.WaitForResult(func() (bool, error) {
if l := len(ctx.upd.events); l != 2 {
return false, fmt.Errorf("Expect two events; got %v", l)
}
if ctx.upd.events[0].Type != structs.TaskReceived {
return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
}
if ctx.upd.events[1].Type != structs.TaskStarted {
return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
testWaitForTaskToStart(t, ctx)
if err := ctx.tr.SaveState(); err != nil {
t.Fatalf("err: %v", err)
@@ -509,6 +500,7 @@ func TestTaskRunner_Download_Retries(t *testing.T) {
}
func TestTaskRunner_Validate_UserEnforcement(t *testing.T) {
ctestutil.ExecCompatible(t)
ctx := testTaskRunner(t, false)
defer ctx.Cleanup()
@@ -544,7 +536,7 @@ func TestTaskRunner_RestartTask(t *testing.T) {
task.Driver = "mock_driver"
task.Config = map[string]interface{}{
"exit_code": "0",
"run_for": "10s",
"run_for": "100s",
}
ctx := testTaskRunnerFromAlloc(t, true, alloc)
@@ -552,11 +544,25 @@ func TestTaskRunner_RestartTask(t *testing.T) {
go ctx.tr.Run()
defer ctx.Cleanup()
// Wait for it to start
go func() {
time.Sleep(time.Duration(testutil.TestMultiplier()*300) * time.Millisecond)
testWaitForTaskToStart(t, ctx)
ctx.tr.Restart("test", "restart")
time.Sleep(time.Duration(testutil.TestMultiplier()*300) * time.Millisecond)
ctx.tr.Kill("test", "restart", false)
// Wait for it to restart then kill
go func() {
// Wait for the task to start again
testutil.WaitForResult(func() (bool, error) {
if len(ctx.upd.events) != 7 {
t.Fatalf("task %q in alloc %q should have 7 ctx.updates: %#v", task.Name, alloc.ID, ctx.upd.events)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
ctx.tr.Kill("test", "restart", false)
}()
}()
select {
@@ -566,7 +572,7 @@ func TestTaskRunner_RestartTask(t *testing.T) {
}
if len(ctx.upd.events) != 9 {
t.Fatalf("should have 9 ctx.upd.tes: %#v", ctx.upd.events)
t.Fatalf("should have 9 ctx.updates: %#v", ctx.upd.events)
}
if ctx.upd.state != structs.TaskStateDead {
@@ -593,7 +599,6 @@ func TestTaskRunner_RestartTask(t *testing.T) {
t.Fatalf("Fifth Event was %v; want %v", ctx.upd.events[4].Type, structs.TaskKilled)
}
t.Logf("%+v", ctx.upd.events[5])
if ctx.upd.events[5].Type != structs.TaskRestarting {
t.Fatalf("Sixth Event was %v; want %v", ctx.upd.events[5].Type, structs.TaskRestarting)
}
@@ -625,7 +630,7 @@ func TestTaskRunner_KillTask(t *testing.T) {
defer ctx.Cleanup()
go func() {
time.Sleep(100 * time.Millisecond)
testWaitForTaskToStart(t, ctx)
ctx.tr.Kill("test", "kill", true)
}()
@@ -680,23 +685,7 @@ func TestTaskRunner_SignalFailure(t *testing.T) {
defer ctx.Cleanup()
// Wait for the task to start
testutil.WaitForResult(func() (bool, error) {
if l := len(ctx.upd.events); l < 2 {
return false, fmt.Errorf("Expect two events; got %v", l)
}
if ctx.upd.events[0].Type != structs.TaskReceived {
return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
}
if ctx.upd.events[1].Type != structs.TaskStarted {
return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
testWaitForTaskToStart(t, ctx)
if err := ctx.tr.Signal("test", "test", syscall.SIGINT); err == nil {
t.Fatalf("Didn't receive error")
@@ -1125,23 +1114,7 @@ func TestTaskRunner_VaultManager_Restart(t *testing.T) {
go ctx.tr.Run()
// Wait for the task to start
testutil.WaitForResult(func() (bool, error) {
if l := len(ctx.upd.events); l != 2 {
return false, fmt.Errorf("Expect two events; got %v", l)
}
if ctx.upd.events[0].Type != structs.TaskReceived {
return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
}
if ctx.upd.events[1].Type != structs.TaskStarted {
return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
testWaitForTaskToStart(t, ctx)
// Error the token renewal
vc := ctx.tr.vaultClient.(*vaultclient.MockVaultClient)
@@ -1213,23 +1186,7 @@ func TestTaskRunner_VaultManager_Signal(t *testing.T) {
defer ctx.Cleanup()
// Wait for the task to start
testutil.WaitForResult(func() (bool, error) {
if l := len(ctx.upd.events); l != 2 {
return false, fmt.Errorf("Expect two events; got %v", l)
}
if ctx.upd.events[0].Type != structs.TaskReceived {
return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
}
if ctx.upd.events[1].Type != structs.TaskStarted {
return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
testWaitForTaskToStart(t, ctx)
// Error the token renewal
vc := ctx.tr.vaultClient.(*vaultclient.MockVaultClient)
@@ -1275,7 +1232,7 @@ func TestTaskRunner_SimpleRun_Dispatch(t *testing.T) {
"run_for": "1s",
}
fileName := "test"
task.DispatchInput = &structs.DispatchInputConfig{
task.DispatchPayload = &structs.DispatchPayloadConfig{
File: fileName,
}
alloc.Job.ParameterizedJob = &structs.ParameterizedJobConfig{}
@@ -1328,6 +1285,27 @@ func TestTaskRunner_SimpleRun_Dispatch(t *testing.T) {
}
}
// TestTaskRunner_CleanupNil ensures TaskRunner doesn't call Driver.Cleanup if
// no resources were created.
func TestTaskRunner_CleanupNil(t *testing.T) {
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
ctx := testTaskRunnerFromAlloc(t, false, alloc)
ctx.tr.MarkReceived()
ctx.tr.createdResources = nil
defer ctx.Cleanup()
ctx.tr.Run()
// Since we only failed once, createdResources should be empty
if ctx.tr.createdResources != nil {
t.Fatalf("createdResources should still be nil: %v", ctx.tr.createdResources)
}
}
func TestTaskRunner_CleanupOK(t *testing.T) {
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]

View File

@@ -120,6 +120,7 @@ func (c *Command) readConfig() *Config {
}), "vault-allow-unauthenticated", "")
flags.StringVar(&cmdConfig.Vault.Token, "vault-token", "", "")
flags.StringVar(&cmdConfig.Vault.Addr, "vault-address", "", "")
flags.StringVar(&cmdConfig.Vault.Role, "vault-create-from-role", "", "")
flags.StringVar(&cmdConfig.Vault.TLSCaFile, "vault-ca-file", "", "")
flags.StringVar(&cmdConfig.Vault.TLSCaPath, "vault-ca-path", "", "")
flags.StringVar(&cmdConfig.Vault.TLSCertFile, "vault-cert-file", "", "")
@@ -934,6 +935,9 @@ Vault Options:
This only needs to be set on Servers. Overrides the Vault token read from
the VAULT_TOKEN environment variable.
-vault-create-from-role=<role>
The role name to create tokens for tasks from.
-vault-allow-unauthenticated
Whether to allow jobs to be sumbitted that request Vault Tokens but do not
authentication. The flag only applies to Servers.

View File

@@ -32,6 +32,13 @@ General Options:
Dispatch Options:
-meta <key>=<value>
Meta takes a key/value pair seperated by "=". The metadata key will be
merged into the job's metadata. The job may define a default value for the
key which is overriden when dispatching. The flag can be provided more than
once to inject multiple metadata key/value pairs. Arbitrary keys are not
allowed. The parameterized job must allow the key to be merged.
-detach
Return immediately instead of entering monitor mode. After job dispatch,
the evaluation ID will be printed to the screen, which can be used to
@@ -44,7 +51,7 @@ Dispatch Options:
}
func (c *JobDispatchCommand) Synopsis() string {
return "Dispatch an instance of a parametereized job"
return "Dispatch an instance of a parameterized job"
}
func (c *JobDispatchCommand) Run(args []string) int {

View File

@@ -4,7 +4,7 @@
$script = <<SCRIPT
# Update apt and get dependencies
sudo apt-get update
sudo apt-get install -y unzip curl wget vim
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y unzip curl wget vim
# Download Nomad
echo Fetching Nomad...
@@ -25,7 +25,7 @@ sudo sed -i -e "s/.*nomad.*/$(ip route get 1 | awk '{print $NF;exit}') nomad/" /
SCRIPT
Vagrant.configure(2) do |config|
config.vm.box = "puphpet/ubuntu1404-x64"
config.vm.box = "ubuntu/xenial64" # 16.04 LTS
config.vm.hostname = "nomad"
config.vm.provision "shell", inline: $script, privileged: false
config.vm.provision "docker" # Just install it

View File

@@ -564,7 +564,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
"artifact",
"config",
"constraint",
"dispatch_input",
"dispatch_payload",
"driver",
"env",
"kill_timeout",
@@ -587,7 +587,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
delete(m, "artifact")
delete(m, "config")
delete(m, "constraint")
delete(m, "dispatch_input")
delete(m, "dispatch_payload")
delete(m, "env")
delete(m, "logs")
delete(m, "meta")
@@ -747,10 +747,10 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
t.Vault = v
}
// If we have a dispatch_input block parse that
if o := listVal.Filter("dispatch_input"); len(o.Items) > 0 {
// If we have a dispatch_payload block parse that
if o := listVal.Filter("dispatch_payload"); len(o.Items) > 0 {
if len(o.Items) > 1 {
return fmt.Errorf("only one dispatch_input block is allowed in a task. Number of dispatch_input blocks found: %d", len(o.Items))
return fmt.Errorf("only one dispatch_payload block is allowed in a task. Number of dispatch_payload blocks found: %d", len(o.Items))
}
var m map[string]interface{}
dispatchBlock := o.Items[0]
@@ -760,15 +760,15 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
"file",
}
if err := checkHCLKeys(dispatchBlock.Val, valid); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s', dispatch_input ->", n))
return multierror.Prefix(err, fmt.Sprintf("'%s', dispatch_payload ->", n))
}
if err := hcl.DecodeObject(&m, dispatchBlock.Val); err != nil {
return err
}
t.DispatchInput = &structs.DispatchInputConfig{}
if err := mapstructure.WeakDecode(m, t.DispatchInput); err != nil {
t.DispatchPayload = &structs.DispatchPayloadConfig{}
if err := mapstructure.WeakDecode(m, t.DispatchPayload); err != nil {
return err
}
}
@@ -1259,12 +1259,11 @@ func parseParameterizedJob(result **structs.ParameterizedJobConfig, list *ast.Ob
return err
}
delete(m, "meta")
// Check for invalid keys
valid := []string{
"payload",
"meta_keys",
"meta_required",
"meta_optional",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return err
@@ -1276,37 +1275,6 @@ func parseParameterizedJob(result **structs.ParameterizedJobConfig, list *ast.Ob
return err
}
var listVal *ast.ObjectList
if ot, ok := o.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return fmt.Errorf("parameterized block should be an object")
}
// Parse the meta block
if metaList := listVal.Filter("meta_keys"); len(metaList.Items) > 0 {
// Get our resource object
o := metaList.Items[0]
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return err
}
// Check for invalid keys
valid := []string{
"optional",
"required",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return err
}
if err := mapstructure.WeakDecode(m, &d); err != nil {
return err
}
}
*result = &d
return nil
}

View File

@@ -571,7 +571,7 @@ func TestParse(t *testing.T) {
MaxFiles: 10,
MaxFileSizeMB: 10,
},
DispatchInput: &structs.DispatchInputConfig{
DispatchPayload: &structs.DispatchPayloadConfig{
File: "foo/bar",
},
},

View File

@@ -1,17 +1,15 @@
job "parameterized_job" {
parameterized {
payload = "required"
meta_keys {
required = ["foo", "bar"]
optional = ["baz", "bam"]
}
meta_required = ["foo", "bar"]
meta_optional = ["baz", "bam"]
}
group "foo" {
task "bar" {
driver = "docker"
resources {}
dispatch_input {
dispatch_payload {
file = "foo/bar"
}
}

View File

@@ -192,6 +192,8 @@ func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error {
// The Evaluation GC should not handle batch jobs since those need to be
// garbage collected in one shot
// XXX believe there is a bug that if a batch job gets stopped, there is no
// way for it to GC the eval/allocs
gc, allocs, err := c.gcEval(eval, oldThreshold, false)
if err != nil {
return err

View File

@@ -965,6 +965,66 @@ func TestCoreScheduler_JobGC_Force(t *testing.T) {
}
}
// This test ensures parameterized and periodic jobs don't get GCd
func TestCoreScheduler_JobGC_NonGCable(t *testing.T) {
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
// Insert a parameterized job.
state := s1.fsm.State()
job := mock.Job()
job.Type = structs.JobTypeBatch
job.Status = structs.JobStatusRunning
job.ParameterizedJob = &structs.ParameterizedJobConfig{
Payload: structs.DispatchPayloadRequired,
}
err := state.UpsertJob(1000, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Insert a periodic job.
job2 := mock.PeriodicJob()
if err := state.UpsertJob(1001, job2); err != nil {
t.Fatalf("err: %v", err)
}
// Create a core scheduler
snap, err := state.Snapshot()
if err != nil {
t.Fatalf("err: %v", err)
}
core := NewCoreScheduler(s1, snap)
// Attempt the GC
gc := s1.coreJobEval(structs.CoreJobForceGC, 1002)
err = core.Process(gc)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should still exist
out, err := state.JobByID(job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
outE, err := state.JobByID(job2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if outE == nil {
t.Fatalf("bad: %v", outE)
}
}
func TestCoreScheduler_PartitionReap(t *testing.T) {
s1 := testServer(t, nil)
defer s1.Shutdown()

View File

@@ -70,7 +70,7 @@ func TestResetHeartbeatTimerLocked(t *testing.T) {
t.Fatalf("missing timer")
}
time.Sleep(10 * time.Millisecond)
time.Sleep(time.Duration(testutil.TestMultiplier()*10) * time.Millisecond)
if _, ok := s1.heartbeatTimers["foo"]; ok {
t.Fatalf("timer should be gone")
@@ -99,7 +99,7 @@ func TestResetHeartbeatTimerLocked_Renew(t *testing.T) {
renew := time.Now()
// Watch for invalidation
for time.Now().Sub(renew) < 20*time.Millisecond {
for time.Now().Sub(renew) < time.Duration(testutil.TestMultiplier()*20)*time.Millisecond {
s1.heartbeatTimersLock.Lock()
_, ok := s1.heartbeatTimers["foo"]
s1.heartbeatTimersLock.Unlock()

View File

@@ -149,9 +149,10 @@ func jobIsGCable(obj interface{}) (bool, error) {
return false, fmt.Errorf("Unexpected type: %v", obj)
}
// The job is GCable if it is batch and it is not periodic
// The job is GCable if it is batch, it is not periodic and is not a
// parameterized job.
periodic := j.Periodic != nil && j.Periodic.Enabled
gcable := j.Type == structs.JobTypeBatch && !periodic
gcable := j.Type == structs.JobTypeBatch && !periodic && !j.IsParameterized()
return gcable, nil
}

View File

@@ -1,7 +1,6 @@
package config
import (
"crypto/tls"
"fmt"
"net/http"
"strings"
@@ -121,7 +120,7 @@ func (a *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig {
result.EnableSSL = b.EnableSSL
}
if b.VerifySSL != nil {
result.VerifySSL = b.EnableSSL
result.VerifySSL = b.VerifySSL
}
if b.CAFile != "" {
result.CAFile = b.CAFile
@@ -180,6 +179,7 @@ func (c *ConsulConfig) ApiConfig() (*consul.Config, error) {
if c.VerifySSL != nil {
tlsConfig.InsecureSkipVerify = !*c.VerifySSL
}
tlsClientCfg, err := consul.SetupTLSConfig(&tlsConfig)
if err != nil {
return nil, fmt.Errorf("error creating tls client config for consul: %v", err)
@@ -188,13 +188,6 @@ func (c *ConsulConfig) ApiConfig() (*consul.Config, error) {
TLSClientConfig: tlsClientCfg,
}
}
if c.EnableSSL != nil && !*c.VerifySSL {
config.HttpClient.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
}
return config, nil
}

View File

@@ -375,8 +375,8 @@ func (t *Task) Diff(other *Task, contextual bool) (*TaskDiff, error) {
diff.Objects = append(diff.Objects, lDiff)
}
// Dispatch Input diff
dDiff := primitiveObjectDiff(t.DispatchInput, other.DispatchInput, nil, "DispatchInput", contextual)
// Dispatch payload diff
dDiff := primitiveObjectDiff(t.DispatchPayload, other.DispatchPayload, nil, "DispatchPayload", contextual)
if dDiff != nil {
diff.Objects = append(diff.Objects, dDiff)
}
@@ -667,11 +667,11 @@ func parameterizedJobDiff(old, new *ParameterizedJobConfig, contextual bool) *Ob
diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual)
// Meta diffs
if optionalDiff := stringSetDiff(old.MetaOptional, new.MetaOptional, "OptionalMeta", contextual); optionalDiff != nil {
if optionalDiff := stringSetDiff(old.MetaOptional, new.MetaOptional, "MetaOptional", contextual); optionalDiff != nil {
diff.Objects = append(diff.Objects, optionalDiff)
}
if requiredDiff := stringSetDiff(old.MetaRequired, new.MetaRequired, "RequiredMeta", contextual); requiredDiff != nil {
if requiredDiff := stringSetDiff(old.MetaRequired, new.MetaRequired, "MetaRequired", contextual); requiredDiff != nil {
diff.Objects = append(diff.Objects, requiredDiff)
}

View File

@@ -904,11 +904,11 @@ func TestJobDiff(t *testing.T) {
Objects: []*ObjectDiff{
{
Type: DiffTypeAdded,
Name: "OptionalMeta",
Name: "MetaOptional",
Fields: []*FieldDiff{
{
Type: DiffTypeAdded,
Name: "OptionalMeta",
Name: "MetaOptional",
Old: "",
New: "foo",
},
@@ -916,11 +916,11 @@ func TestJobDiff(t *testing.T) {
},
{
Type: DiffTypeAdded,
Name: "RequiredMeta",
Name: "MetaRequired",
Fields: []*FieldDiff{
{
Type: DiffTypeAdded,
Name: "RequiredMeta",
Name: "MetaRequired",
Old: "",
New: "bar",
},
@@ -958,11 +958,11 @@ func TestJobDiff(t *testing.T) {
Objects: []*ObjectDiff{
{
Type: DiffTypeDeleted,
Name: "OptionalMeta",
Name: "MetaOptional",
Fields: []*FieldDiff{
{
Type: DiffTypeDeleted,
Name: "OptionalMeta",
Name: "MetaOptional",
Old: "foo",
New: "",
},
@@ -970,11 +970,11 @@ func TestJobDiff(t *testing.T) {
},
{
Type: DiffTypeDeleted,
Name: "RequiredMeta",
Name: "MetaRequired",
Fields: []*FieldDiff{
{
Type: DiffTypeDeleted,
Name: "RequiredMeta",
Name: "MetaRequired",
Old: "bar",
New: "",
},
@@ -1018,17 +1018,17 @@ func TestJobDiff(t *testing.T) {
Objects: []*ObjectDiff{
{
Type: DiffTypeEdited,
Name: "OptionalMeta",
Name: "MetaOptional",
Fields: []*FieldDiff{
{
Type: DiffTypeAdded,
Name: "OptionalMeta",
Name: "MetaOptional",
Old: "",
New: "bam",
},
{
Type: DiffTypeDeleted,
Name: "OptionalMeta",
Name: "MetaOptional",
Old: "foo",
New: "",
},
@@ -1036,17 +1036,17 @@ func TestJobDiff(t *testing.T) {
},
{
Type: DiffTypeEdited,
Name: "RequiredMeta",
Name: "MetaRequired",
Fields: []*FieldDiff{
{
Type: DiffTypeAdded,
Name: "RequiredMeta",
Name: "MetaRequired",
Old: "",
New: "bang",
},
{
Type: DiffTypeDeleted,
Name: "RequiredMeta",
Name: "MetaRequired",
Old: "bar",
New: "",
},
@@ -1091,11 +1091,11 @@ func TestJobDiff(t *testing.T) {
Objects: []*ObjectDiff{
{
Type: DiffTypeNone,
Name: "OptionalMeta",
Name: "MetaOptional",
Fields: []*FieldDiff{
{
Type: DiffTypeNone,
Name: "OptionalMeta",
Name: "MetaOptional",
Old: "foo",
New: "foo",
},
@@ -1103,11 +1103,11 @@ func TestJobDiff(t *testing.T) {
},
{
Type: DiffTypeNone,
Name: "RequiredMeta",
Name: "MetaRequired",
Fields: []*FieldDiff{
{
Type: DiffTypeNone,
Name: "RequiredMeta",
Name: "MetaRequired",
Old: "bar",
New: "bar",
},
@@ -3666,10 +3666,10 @@ func TestTaskDiff(t *testing.T) {
},
},
{
// DispatchInput added
// DispatchPayload added
Old: &Task{},
New: &Task{
DispatchInput: &DispatchInputConfig{
DispatchPayload: &DispatchPayloadConfig{
File: "foo",
},
},
@@ -3678,7 +3678,7 @@ func TestTaskDiff(t *testing.T) {
Objects: []*ObjectDiff{
{
Type: DiffTypeAdded,
Name: "DispatchInput",
Name: "DispatchPayload",
Fields: []*FieldDiff{
{
Type: DiffTypeAdded,
@@ -3692,9 +3692,9 @@ func TestTaskDiff(t *testing.T) {
},
},
{
// DispatchInput deleted
// DispatchPayload deleted
Old: &Task{
DispatchInput: &DispatchInputConfig{
DispatchPayload: &DispatchPayloadConfig{
File: "foo",
},
},
@@ -3704,7 +3704,7 @@ func TestTaskDiff(t *testing.T) {
Objects: []*ObjectDiff{
{
Type: DiffTypeDeleted,
Name: "DispatchInput",
Name: "DispatchPayload",
Fields: []*FieldDiff{
{
Type: DiffTypeDeleted,
@@ -3718,14 +3718,14 @@ func TestTaskDiff(t *testing.T) {
},
},
{
// Dispatch input edited
// Dispatch payload edited
Old: &Task{
DispatchInput: &DispatchInputConfig{
DispatchPayload: &DispatchPayloadConfig{
File: "foo",
},
},
New: &Task{
DispatchInput: &DispatchInputConfig{
DispatchPayload: &DispatchPayloadConfig{
File: "bar",
},
},
@@ -3734,7 +3734,7 @@ func TestTaskDiff(t *testing.T) {
Objects: []*ObjectDiff{
{
Type: DiffTypeEdited,
Name: "DispatchInput",
Name: "DispatchPayload",
Fields: []*FieldDiff{
{
Type: DiffTypeEdited,
@@ -3748,16 +3748,16 @@ func TestTaskDiff(t *testing.T) {
},
},
{
// DispatchInput edited with context. Place holder for if more
// DispatchPayload edited with context. Place holder for if more
// fields are added
Contextual: true,
Old: &Task{
DispatchInput: &DispatchInputConfig{
DispatchPayload: &DispatchPayloadConfig{
File: "foo",
},
},
New: &Task{
DispatchInput: &DispatchInputConfig{
DispatchPayload: &DispatchPayloadConfig{
File: "bar",
},
},
@@ -3766,7 +3766,7 @@ func TestTaskDiff(t *testing.T) {
Objects: []*ObjectDiff{
{
Type: DiffTypeEdited,
Name: "DispatchInput",
Name: "DispatchPayload",
Fields: []*FieldDiff{
{
Type: DiffTypeEdited,

View File

@@ -1647,10 +1647,10 @@ type ParameterizedJobConfig struct {
Payload string
// MetaRequired is metadata keys that must be specified by the dispatcher
MetaRequired []string `mapstructure:"required"`
MetaRequired []string `mapstructure:"meta_required"`
// MetaOptional is metadata keys that may be specified by the dispatcher
MetaOptional []string `mapstructure:"optional"`
MetaOptional []string `mapstructure:"meta_optional"`
}
func (d *ParameterizedJobConfig) Validate() error {
@@ -1694,22 +1694,22 @@ func DispatchedID(templateID string, t time.Time) string {
return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u)
}
// DispatchInputConfig configures how a task gets its input from a job dispatch
type DispatchInputConfig struct {
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
type DispatchPayloadConfig struct {
// File specifies a relative path to where the input data should be written
File string
}
func (d *DispatchInputConfig) Copy() *DispatchInputConfig {
func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig {
if d == nil {
return nil
}
nd := new(DispatchInputConfig)
nd := new(DispatchPayloadConfig)
*nd = *d
return nd
}
func (d *DispatchInputConfig) Validate() error {
func (d *DispatchPayloadConfig) Validate() error {
// Verify the destination doesn't escape
escaped, err := PathEscapesAllocDir("task/local/", d.File)
if err != nil {
@@ -2272,8 +2272,8 @@ type Task struct {
// Resources is the resources needed by this task
Resources *Resources
// DispatchInput configures how the task retrieves its input from a dispatch
DispatchInput *DispatchInputConfig
// DispatchPayload configures how the task retrieves its input from a dispatch
DispatchPayload *DispatchPayloadConfig
// Meta is used to associate arbitrary metadata with this
// task. This is opaque to Nomad.
@@ -2312,7 +2312,7 @@ func (t *Task) Copy() *Task {
nt.Vault = nt.Vault.Copy()
nt.Resources = nt.Resources.Copy()
nt.Meta = helper.CopyMapStringString(nt.Meta)
nt.DispatchInput = nt.DispatchInput.Copy()
nt.DispatchPayload = nt.DispatchPayload.Copy()
if t.Artifacts != nil {
artifacts := make([]*TaskArtifact, 0, len(t.Artifacts))
@@ -2477,10 +2477,10 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk) error {
}
}
// Validate the dispatch input block if there
if t.DispatchInput != nil {
if err := t.DispatchInput.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Input validation failed: %v", err))
// Validate the dispatch payload block if there
if t.DispatchPayload != nil {
if err := t.DispatchPayload.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err))
}
}

View File

@@ -1518,8 +1518,8 @@ func TestParameterizedJobConfig_Canonicalize(t *testing.T) {
}
}
func TestDispatchInputConfig_Validate(t *testing.T) {
d := &DispatchInputConfig{
func TestDispatchPayloadConfig_Validate(t *testing.T) {
d := &DispatchPayloadConfig{
File: "foo",
}

View File

@@ -591,9 +591,6 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
unblock := make(chan struct{})
for i := 0; i < numRequests; i++ {
go func() {
// Ensure all the goroutines are made
time.Sleep(10 * time.Millisecond)
// Lookup ourselves
_, err := client.LookupToken(ctx, v.Config.Token)
if err != nil {
@@ -607,7 +604,7 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
// Cancel the context
cancel()
time.AfterFunc(1*time.Second, func() { close(unblock) })
close(unblock)
}()
}
@@ -618,9 +615,15 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
}
desired := numRequests - 1
if cancels != desired {
t.Fatalf("Incorrect number of cancels; got %d; want %d", cancels, desired)
}
testutil.WaitForResult(func() (bool, error) {
if cancels != desired {
return false, fmt.Errorf("Incorrect number of cancels; got %d; want %d", cancels, desired)
}
return true, nil
}, func(err error) {
t.Fatalf("Connection not established")
})
}
func TestVaultClient_CreateToken_Root(t *testing.T) {
@@ -764,6 +767,16 @@ func TestVaultClient_CreateToken_Root_Target_Role(t *testing.T) {
}
func TestVaultClient_CreateToken_Blacklist_Role(t *testing.T) {
// Need to skip if test is 0.6.4
version, err := testutil.VaultVersion()
if err != nil {
t.Fatalf("failed to determine version: %v", err)
}
if strings.Contains(version, "v0.6.4") {
t.Skipf("Vault has a regression in v0.6.4 that this test hits")
}
v := testutil.NewTestVault(t).Start()
defer v.Stop()

16
scripts/build-dev.sh Executable file
View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
set -e
GIT_COMMIT="$(git rev-parse HEAD)"
GIT_DIRTY="$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)"
LDFLAG="main.GitCommit=${GIT_COMMIT}${GIT_DIRTY}"
TAGS="nomad_test"
if [[ $(uname) == "Linux" ]]; then
if pkg-config --exists lxc; then
TAGS="$TAGS lxc"
fi
fi
echo "--> Installing with tags: $TAGS"
go install -ldflags "-X $LDFLAG" -tags "${TAGS}"

View File

@@ -14,14 +14,7 @@ cd "$DIR"
# Get the git commit
GIT_COMMIT="$(git rev-parse HEAD)"
GIT_DIRTY="$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)"
# Determine the arch/os combos we're building for
# XC_ARCH=${XC_ARCH:-"386 amd64"}
# XC_OS=${XC_OS:-linux}
XC_ARCH=${XC_ARCH:-"amd64 386"}
XC_OS=${XC_OS:-"darwin windows"}
XC_EXCLUDE=${XC_EXCLUDE:-"!darwin/arm !darwin/386"}
LDFLAG="main.GitCommit=${GIT_COMMIT}${GIT_DIRTY}"
# Delete the old dir
echo "==> Removing old directory..."
@@ -29,25 +22,35 @@ rm -f bin/*
rm -rf pkg/*
mkdir -p bin/
# If its dev mode, only build for ourself
if [[ "${NOMAD_DEV}" ]]; then
XC_OS=$(go env GOOS)
XC_ARCH=$(go env GOARCH)
if [[ $(uname) == "Linux" ]]; then
echo "==> Building linux 386..."
CGO_ENABLED=1 GOARCH="386" GOOS="linux" go build -ldflags "-X $LDFLAG" -o "pkg/linux_386/nomad"
echo "==> Building linux amd64..."
CGO_ENBALED=1 GOARCH="amd64" GOOS="linux" go build -ldflags "-X $LDFLAG" -o "pkg/linux_amd64/nomad"
echo "==> Building linux amd64 with lxc..."
CGO_ENBALED=1 GOARCH="amd64" GOOS="linux" go build -ldflags "-X $LDFLAG" -o "pkg/linux_amd64-lxc/nomad" -tags "lxc"
echo "==> Building linux arm..."
CC="arm-linux-gnueabi-gcc-5" GOOS=linux GOARCH="arm" CGO_ENABLED=1 go build -ldflags "-X $LDFLAG" -o "pkg/linux_arm/nomad"
echo "==> Building linux arm64..."
CC="aarch64-linux-gnu-gcc-5" GOOS=linux GOARCH="arm64" CGO_ENABLED=1 go build -ldflags "-X $LDFLAG" -o "pkg/linux_arm64/nomad"
echo "==> Building windows 386..."
CGO_ENABLED=1 GOARCH="386" GOOS="windows" go build -ldflags "-X $LDFLAG" -o "pkg/windows_386/nomad"
echo "==> Building windows amd64..."
CGO_ENABLED=1 GOARCH="amd64" GOOS="windows" go build -ldflags "-X $LDFLAG" -o "pkg/windows_amd64/nomad"
elif [[ $(uname) == "Darwin" ]]; then
echo "==> Building darwin amd64..."
CGO_ENABLED=1 GOARCH="amd64" GOOS="darwin" go build -ldflags "-X $LDFLAG" -o "pkg/darwin_amd64/nomad"
else
echo "Unable to build on $(uname). Use Linux or Darwin."
exit 1
fi
# Build!
echo "==> Building..."
gox \
-os="${XC_OS}" \
-arch="${XC_ARCH}" \
-osarch="${XC_EXCLUDE}" \
-cgo \
-ldflags "-X main.GitCommit='${GIT_COMMIT}${GIT_DIRTY}'" \
-output "pkg/{{.OS}}_{{.Arch}}/nomad" \
.
echo ""
# Move all the compiled things to the $GOPATH/bin
GOPATH=${GOPATH:-$(go env GOPATH)}
case $(uname) in
@@ -66,20 +69,19 @@ for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f); do
cp ${F} ${MAIN_GOPATH}/bin/
done
if [[ "x${NOMAD_DEV}" == "x" ]]; then
# Zip and copy to the dist dir
echo "==> Packaging..."
for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do
OSARCH=$(basename ${PLATFORM})
echo "--> ${OSARCH}"
# Zip and copy to the dist dir
echo "==> Packaging..."
for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do
OSARCH=$(basename ${PLATFORM})
echo "--> ${OSARCH}"
pushd $PLATFORM >/dev/null 2>&1
zip ../${OSARCH}.zip ./*
popd >/dev/null 2>&1
done
fi
pushd $PLATFORM >/dev/null 2>&1
zip ../${OSARCH}.zip ./*
popd >/dev/null 2>&1
done
# Done!
echo
echo "==> Results:"
ls -hl bin/
tree pkg/

View File

@@ -1,13 +1,18 @@
#!/bin/bash
set -ex
set -e
CONSUL_VERSION="0.7.2"
CONSUL_VERSION="0.7.3"
CURDIR=`pwd`
if [[ $(which consul >/dev/null && consul version | head -n 1 | cut -d ' ' -f 2) == "v$CONSUL_VERSION" ]]; then
echo "Consul v$CONSUL_VERSION already installed; Skipping"
exit
fi
echo Fetching Consul...
cd /tmp/
wget https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip -O consul.zip
wget -q https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip -O consul.zip
echo Installing Consul...
unzip consul.zip
sudo chmod +x consul

View File

@@ -1,9 +1,8 @@
#!/bin/bash
set -ex
set -e
RKT_VERSION="v1.17.0"
RKT_SHA512="30fd15716e148afa34ed28e6d5d778226e5e9761e9df3eb98f397cb2a7f3e3fc78e3dad2b717eee4157afc58183778cb1872aa82f3d05cc2bc9fb41193e81a7f"
RKT_VERSION="v1.18.0"
CMD="cp"
if [ ! -v DEST_DIR ]; then
@@ -11,18 +10,16 @@ if [ ! -v DEST_DIR ]; then
CMD="sudo cp"
fi
if [ ! -d "rkt-${RKT_VERSION}" ]; then
printf "rkt-%s/ doesn't exist\n" "${RKT_VERSION}"
if [ ! -f "rkt-${RKT_VERSION}.tar.gz" ]; then
printf "Fetching rkt-%s.tar.gz\n" "${RKT_VERSION}"
echo "$RKT_SHA512 rkt-${RKT_VERSION}.tar.gz" > rkt-$RKT_VERSION.tar.gz.sha512sum
wget https://github.com/coreos/rkt/releases/download/$RKT_VERSION/rkt-$RKT_VERSION.tar.gz
sha512sum --check rkt-$RKT_VERSION.tar.gz.sha512sum
tar xzvf rkt-$RKT_VERSION.tar.gz
fi
if [[ $(which rkt >/dev/null && rkt version | head -n 1) == "rkt Version: 1.18.0" ]]; then
echo "rkt installed; Skipping"
else
printf "Fetching rkt-%s.tar.gz\n" "${RKT_VERSION}"
cd /tmp
wget -q https://github.com/coreos/rkt/releases/download/$RKT_VERSION/rkt-$RKT_VERSION.tar.gz -O rkt.tar.gz
tar xzf rkt.tar.gz
$CMD rkt-$RKT_VERSION/rkt $DEST_DIR
$CMD rkt-$RKT_VERSION/*.aci $DEST_DIR
fi
$CMD rkt-$RKT_VERSION/rkt $DEST_DIR
$CMD rkt-$RKT_VERSION/*.aci $DEST_DIR
rkt version

View File

@@ -1,8 +1,11 @@
#!/bin/bash
set -ex
set -e
# Configure rkt networking
sudo mkdir -p /etc/rkt/net.d
echo '{"name": "default", "type": "ptp", "ipMasq": false, "ipam": { "type": "host-local", "subnet": "172.16.28.0/24", "routes": [ { "dst": "0.0.0.0/0" } ] } }' | sudo tee -a /etc/rkt/net.d/99-network.conf
if [[ -f /etc/rkt/net.d/99-network.conf ]]; then
echo "rkt network already configured; Skipping"
exit
fi
echo '{"name": "default", "type": "ptp", "ipMasq": false, "ipam": { "type": "host-local", "subnet": "172.16.28.0/24", "routes": [ { "dst": "0.0.0.0/0" } ] } }' | jq . | sudo tee -a /etc/rkt/net.d/99-network.conf

View File

@@ -1,13 +1,18 @@
#!/bin/bash
set -ex
set -e
VAULT_VERSION="0.6.2"
VAULT_VERSION="0.6.4"
CURDIR=`pwd`
if [[ $(which vault >/dev/null && vault version | cut -d ' ' -f 2) == "v$VAULT_VERSION" ]]; then
echo "Vault v$VAULT_VERSION already installed; Skipping"
exit
fi
echo Fetching Vault ${VAULT_VERSION}...
cd /tmp/
wget https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip -O vault.zip
wget -q https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip -O vault.zip
echo Installing Vault...
unzip vault.zip
sudo chmod +x vault

View File

@@ -1,7 +1,12 @@
#!/usr/bin/env bash
set -e
GOTEST_TAGS="nomad_test lxc"
GOTEST_TAGS="nomad_test"
if [[ $(uname) == "Linux" ]]; then
if pkg-config --exists lxc; then
GOTEST_TAGS="$GOTEST_TAGS lxc"
fi
fi
# Create a temp dir and clean it up on exit
TEMPDIR=`mktemp -d -t nomad-test.XXX`
@@ -9,7 +14,8 @@ trap "rm -rf $TEMPDIR" EXIT HUP INT QUIT TERM
# Build the Nomad binary for the API tests
echo "--> Building nomad"
go build -tags "$GOTEST_TAGS" -o $TEMPDIR/nomad || exit 1
echo go build -i -tags \"$GOTEST_TAGS\" -o $TEMPDIR/nomad
go build -i -tags "$GOTEST_TAGS" -o $TEMPDIR/nomad || exit 1
# Run the tests
echo "--> Running tests"

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -e
export PING_SLEEP=30
export PING_SLEEP=60
bash -c "while true; do echo \$(date) - building ...; sleep $PING_SLEEP; done" &
PING_LOOP_PID=$!

View File

@@ -123,3 +123,11 @@ func getPort() uint64 {
vaultPortOffset += 1
return p
}
// VaultVersion returns the Vault version as a string or an error if it couldn't
// be determined
func VaultVersion() (string, error) {
cmd := exec.Command("vault", "version")
out, err := cmd.Output()
return string(out), err
}

View File

@@ -5,9 +5,9 @@ var GitCommit string
var GitDescribe string
// The main version number that is being run at the moment.
const Version = "0.5.3"
const Version = "0.5.4"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release
// such as "dev" (in development), "beta", "rc1", etc.
const VersionPrerelease = "rc1"
const VersionPrerelease = "dev"

View File

@@ -1,7 +1,7 @@
{
"allowed_policies": "nomad-server",
"disallowed_policies": "nomad-server",
"explicit_max_ttl": 0,
"name": "nomad-server",
"name": "nomad-cluster",
"orphan": false,
"period": 259200,
"renewable": true

View File

@@ -1,20 +1,35 @@
# Allow creating tokens under the role
path "auth/token/create/nomad-server" {
# Allow creating tokens under "nomad-cluster" role. The role name should be
# updated if "nomad-cluster" is not used.
path "auth/token/create/nomad-cluster" {
capabilities = ["update"]
}
# Allow looking up the role
path "auth/token/roles/nomad-server" {
# Allow looking up "nomad-cluster" role. The role name should be updated if
# "nomad-cluster" is not used.
path "auth/token/roles/nomad-cluster" {
capabilities = ["read"]
}
# Allow looking up incoming tokens to validate they have permissions to
# access the tokens they are requesting
path "auth/token/lookup/*" {
capabilities = ["read"]
}
# Allow revoking tokens that should no longer exist
path "/auth/token/revoke-accessor/*" {
# Allow looking up incoming tokens to validate they have permissions to access
# the tokens they are requesting. This is only required if
# `allow_unauthenticated` is set to false.
path "auth/token/lookup" {
capabilities = ["update"]
}
# Allow revoking tokens that should no longer exist. This allows revoking
# tokens for dead tasks.
path "auth/token/revoke-accessor" {
capabilities = ["update"]
}
# Allow checking the capabilities of our own token. This is used to validate the
# token upon startup.
path "/sys/capabilities-self" {
capabilities = ["update"]
}
# Allow our own token to be renewed.
path "auth/token/renew-self" {
capabilities = ["update"]
}

View File

@@ -51,7 +51,9 @@ vault {
The token given to Nomad does not have to be created from this role but must
have "update" capability on "auth/token/create/<create_from_role>" path in
Vault. If this value is unset and the token is created from a role, the value
is defaulted to the role the token is from.
is defaulted to the role the token is from. This is largely for backwards
compatibility. It is recommended to set the `create_from_role` field if Nomad
is deriving child tokens from a role.
- `task_token_ttl` `(string: "")` - Specifies the TTL of created tokens when
using a root token. This is specified using a label suffix like "30s" or "1h".
@@ -117,6 +119,11 @@ vault {
# should set the VAULT_TOKEN environment variable when starting the Nomad
# agent
token = "debecfdc-9ed7-ea22-c6ee-948f22cdd474"
# Setting the create_from_role option causes Nomad to create tokens for tasks
# via the provided role. This allows the role to manage what policies are
# allowed and disallowed for use by tasks.
create_from_role = "nomad-server"
}
```

View File

@@ -54,3 +54,18 @@ are not participating in Raft. Thus clients can have 100+ millisecond latency to
their servers. This allows having a set of Nomad servers that service clients
that can be spread geographically over a continent or even the world in the case
of having a single "global" region and many datacenter.
## Ports Used
Nomad requires 3 different ports to work properly on servers and 2 on clients,
some on TCP, UDP, or both protocols. Below we document the requirements for each
port.
* HTTP API (Default 4646). This is used by clients and servers to serve the HTTP
API. TCP only.
* RPC (Default 4647). This is used by servers and clients to communicate amongst
each other. TCP only.
* Serf WAN (Default 4648). This is used by servers to gossip over the WAN to
other servers. TCP and UDP.

View File

@@ -65,6 +65,7 @@ via CLI arguments. The `agent` command accepts the following arguments:
* `-vault-token=<token>`: The Vault token used to derive tokens. Only needs to
be set on Servers. Overrides the Vault token read from the VAULT_TOKEN
environment variable.
* `-vault-create-from-role=<role>`: The role name to create tokens for tasks from.
* `-vault-ca-file=<path>`: Path to a PEM-encoded CA cert file used to verify the
Vault server SSL certificate.
* `-vault-ca-path=<path>`: Path to a directory of PEM-encoded CA cert files used

View File

@@ -0,0 +1,114 @@
---
layout: "docs"
page_title: "Commands: job dispatch"
sidebar_current: "docs-commands-job-dispatch"
description: >
The dispatch command is used to create an instance of a parameterized job.
---
# Command: job dispatch
~> The `job dispatch` subcommand described here is available only in version
0.5.3 and later. The release canidate is downloadable on the [releases
page.](https://releases.hashicorp.com/nomad/0.5.3-rc1/)
The `job dispatch` command is used to create new instances of a [parameterized
job]. The parameterized job captures a job's configuration and runtime
requirements in a generic way and `dispatch` is used to provide the input for
the job to run against. A parameterized job is similar to a function definition,
and dispatch is used to invoke the function.
Each time a job is dispatched, a unique job ID is generated. This allows a
caller to track the status of the job, much like a future or promise in some
programming languages.
## Usage
```
nomad job dispatch [options] <parameterized job> [input source]
```
Dispatch creates an instance of a parameterized job. A data payload to the
dispatched instance can be provided via stdin by using "-" for the input source
or by specifiying a path to a file. Metadata can be supplied by using the meta
flag one or more times.
The payload has a **size limit of 16KiB**.
Upon successfully creation, the dispatched job ID will be printed and the
triggered evaluation will be monitored. This can be disabled by supplying the
detach flag.
On successful job submission and scheduling, exit code 0 will be returned. If
there are job placement issues encountered (unsatisfiable constraints, resource
exhaustion, etc), then the exit code will be 2. Any other errors, including
client connection issues or internal errors, are indicated by exit code 1.
## General Options
<%= partial "docs/commands/_general_options" %>
## Run Options
* `-meta`: Meta takes a key/value pair seperated by "=". The metadata key will
be merged into the job's metadata. The job may define a default value for the
key which is overriden when dispatching. The flag can be provided more than
once to inject multiple metadata key/value pairs. Arbitrary keys are not
allowed. The parameterized job must allow the key to be merged.
* `-detach`: Return immediately instead of monitoring. A new evaluation ID
will be output, which can be used to examine the evaluation using the
[eval-status](/docs/commands/eval-status.html) command
* `-verbose`: Show full information.
## Examples
Dispatch against a parameterized job with the ID "video-encode" and
passing in a configuration payload via stdin:
```
$ cat << EOF | nomad job dispatch video-encode -
{
"s3-input": "https://s3-us-west-1.amazonaws.com/video-bucket/cb31dabb1",
"s3-output": "https://s3-us-west-1.amazonaws.com/video-bucket/a149adbe3",
"input-codec": "mp4",
"output-codec": "webm",
"quality": "1080p"
}
EOF
Dispatched Job ID = video-encode/dispatch-1485379325-cb38d00d
Evaluation ID = 31199841
==> Monitoring evaluation "31199841"
Evaluation triggered by job "example/dispatch-1485379325-cb38d00d"
Allocation "8254b85f" created: node "82ff9c50", group "cache"
Evaluation status changed: "pending" -> "complete"
==> Evaluation "31199841" finished with status "complete"
```
Dispatch against a parameterized job with the ID "video-encode" and
passing in a configuration payload via a file:
```
$ nomad job dispatch video-encode video-config.json
Dispatched Job ID = video-encode/dispatch-1485379325-cb38d00d
Evaluation ID = 31199841
==> Monitoring evaluation "31199841"
Evaluation triggered by job "example/dispatch-1485379325-cb38d00d"
Allocation "8254b85f" created: node "82ff9c50", group "cache"
Evaluation status changed: "pending" -> "complete"
==> Evaluation "31199841" finished with status "complete"
```
Dispatch against a parameterized job with the ID "video-encode" using the detach
flag:
```
$ nomad job dispatch -detach video-encode video-config.json
Dispatched Job ID = example/dispatch-1485380684-c37b3dba
Evaluation ID = d9034c4e
```
[parameterized job]: /docs/job-specification/parameterized.html "Nomad parameterized Job Specification"

View File

@@ -54,26 +54,28 @@ Short view of a specific job:
```
$ nomad status -short job1
ID = job1
Name = Test Job
Type = service
Priority = 3
Datacenters = dc1,dc2,dc3
Status = pending
Periodic = false
ID = job1
Name = Test Job
Type = service
Priority = 3
Datacenters = dc1,dc2,dc3
Status = pending
Periodic = false
Parameterized = false
```
Full status information of a job:
```
$ nomad status example
ID = example
Name = example
Type = service
Priority = 50
Datacenters = dc1
Status = running
Periodic = false
ID = example
Name = example
Type = service
Priority = 50
Datacenters = dc1
Status = running
Periodic = false
Parameterized = false
Summary
Task Group Queued Starting Running Failed Complete Lost
@@ -84,17 +86,71 @@ ID Eval ID Node ID Task Group Desired Status Created At
24cfd201 81efc2fa 8d0331e9 cache run running 08/08/16 21:03:19 CDT
```
Full status information of a perodic job:
```
ID = example
Name = example
Type = batch
Priority = 50
Datacenters = dc1
Status = running
Periodic = true
Parameterized = false
Next Periodic Launch = 01/26/17 06:19:46 UTC (1s from now)
Children Job Summary
Pending Running Dead
0 5 0
Previously Launched Jobs
ID Status
example/periodic-1485411581 running
example/periodic-1485411582 running
example/periodic-1485411583 running
example/periodic-1485411584 running
example/periodic-1485411585 running
```
Full status information of a parameterized job:
```
ID = example
Name = example
Type = batch
Priority = 50
Datacenters = dc1
Status = running
Periodic = false
Parameterized = true
Parameterized Job
Payload = required
Required Metadata = foo
Optional Metadata = bar
Parameterized Job Summary
Pending Running Dead
0 2 0
Dispatched Jobs
ID Status
example/dispatch-1485411496-58f24d2d running
example/dispatch-1485411499-fa2ee40e running
```
Full status information of a job with placement failures:
```
$ nomad status example
ID = example
Name = example
Type = service
Priority = 50
Datacenters = dc1
Status = running
Periodic = false
ID = example
Name = example
Type = service
Priority = 50
Datacenters = dc1
Status = running
Periodic = false
Parameterized = false
Summary
Task Group Queued Starting Running Failed Complete Lost
@@ -120,13 +176,14 @@ become availables so that it can place the remaining allocations.
```
$ nomad status -evals example
ID = example
Name = example
Type = service
Priority = 50
Datacenters = dc1
Status = running
Periodic = false
ID = example
Name = example
Type = service
Priority = 50
Datacenters = dc1
Status = running
Periodic = false
Parameterized = false
Summary
Task Group Queued Starting Running Failed Complete Lost

View File

@@ -137,7 +137,6 @@ region is used; another region can be specified using the `?region=` query param
"ModifyIndex": 14
}
```
</dd>
</dl>
@@ -275,6 +274,11 @@ region is used; another region can be specified using the `?region=` query param
```javascript
{
"JobID": "example",
"Children": {
"Dead": 0,
"Running": 7,
"Pending": 2
},
"Summary": {
"cache": {
"Queued": 0,
@@ -333,6 +337,52 @@ region is used; another region can be specified using the `?region=` query param
</dd>
</dl>
<dl>
<dt>Description</dt>
<dd>
Dispatch a new instance of a parameterized job.
</dd>
<dt>Method</dt>
<dd>PUT or POST</dd>
<dt>URL</dt>
<dd>`/v1/job/<ID>/dispatch`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">Payload</span>
<span class="param-flags">optional</span>
A `[]byte` array encoded as a base64 string with a maximum size of 16KiB.
</li>
<li>
<span class="param">Meta</span>
<span class="param-flags">optional</span>
A `map[string]string` of metadata keys to their values.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"KnownLeader": false,
"LastContact": 0,
"Index": 13,
"JobCreateIndex": 12,
"EvalCreateIndex": 13,
"EvalID": "e5f55fac-bc69-119d-528a-1fc7ade5e02c",
"DispatchedJobID": "example/dispatch-1485408778-81644024"
}
```
</dd>
</dl>
<dl>
<dt>Description</dt>
<dd>

View File

@@ -137,7 +137,10 @@ Below is an example of a JSON object that submits a `periodic` job to Nomad:
},
"RelativeDest":"local/"
}
]
],
"DispatchPayload": {
"File": "config.json"
}
}
],
"RestartPolicy":{
@@ -165,7 +168,17 @@ Below is an example of a JSON object that submits a `periodic` job to Nomad:
"Meta":{
"foo":"bar",
"baz":"pipe"
}
},
"ParameterizedJob": {
"Payload": "required",
"MetaRequired": [
"foo"
],
"MetaOptional": [
"bar"
]
},
"Payload": null
}
}
```
@@ -194,6 +207,25 @@ The `Job` object supports the following keys:
* `Meta` - Annotates the job with opaque metadata.
* `ParameterizedJob` - Specifies the job as a paramterized job such that it can
be dispatched against. The `ParamaterizedJob` object supports the following
attributes:
* `MetaOptional` - Specifies the set of metadata keys that may be provided
when dispatching against the job as a string array.
* `MetaRequired` - Specifies the set of metadata keys that must be provided
when dispatching against the job as a string array.
* `Payload` - Specifies the requirement of providing a payload when
dispatching against the parameterized job. The options for this field are
"optional", "required" and "forbidden". The default value is "optional".
* `Payload` - The payload may not be set when submitting a job but may appear in
a dispatched job. The `Payload` will be a base64 encoded string containing the
payload that the job was dispatched with. The `payload` has a **maximum size
of 16 KiB**.
* `Priority` - Specifies the job priority which is used to prioritize
scheduling and access to resources. Must be between 1 and 100 inclusively,
and defaults to 50.
@@ -295,6 +327,12 @@ The `Task` object supports the following keys:
* `Constraints` - This is a list of `Constraint` objects. See the constraint
reference for more details.
- `DispatchPayload` - Configures the task to have access to dispatch payloads.
The `DispatchPayload` object supports the following attributes:
* `File` - Specifies the file name to write the content of dispatch payload
to. The file is written relative to the task's local directory.
* `Driver` - Specifies the task driver that should be used to run the
task. See the [driver documentation](/docs/drivers/index.html) for what
is available. Examples include `docker`, `qemu`, `java`, and `exec`.

View File

@@ -51,15 +51,15 @@ before the starting the task.
default value is to place the binary in `local/`. The destination is treated
as a directory and source files will be downloaded into that directory path.
- `source` `(string: <required>)` - Specifies the URL of the artifact to download.
Only `http`, `https`, and `s3` URLs are supported. See [`go-getter`][go-getter]
for details.
- `options` `(map<string|string>: nil)` - Specifies configuration parameters to
fetch the artifact. The key-value pairs map directly to parameters appended to
the supplied `source` URL. Please see the [`go-getter`
documentation][go-getter] for a complete list of options and examples
- `source` `(string: <required>)` - Specifies the URL of the artifact to download.
Only `http`, `https`, and `s3` URLs are supported. See [`go-getter`][go-getter]
for details.
## `artifact` Examples
The following examples only show the `artifact` stanzas. Remember that the

View File

@@ -0,0 +1,62 @@
---
layout: "docs"
page_title: "dispatch_payload Stanza - Job Specification"
sidebar_current: "docs-job-specification-dispatch-payload"
description: |-
The "dispatch_payload" stanza allows a task to access dispatch payloads.
to
---
# `dispatch_payload` Stanza
<table class="table table-bordered table-striped">
<tr>
<th width="120">Placement</th>
<td>
<code>job -> group -> task -> **dispatch_payload**</code>
</td>
</tr>
</table>
The `dispatch_payload` stanza is used in conjuction with a [`paramterized`][parameterized] job
that expects a payload. When the job is dispatched with a payload, the payload
will be made available to any task that has a `dispatch_payload` stanza. The
payload will be written to the configured file before the task is started. This
allows the task to use the payload as input or configuration.
```hcl
job "docs" {
group "example" {
task "server" {
dispatch_payload {
file = "config.json"
}
}
}
}
```
## `dispatch_payload` Parameters
- `file` `(string: "")` - Specifies the file name to write the content of
dispatch payload to. The file is written relative to the [task's local
directory][localdir].
## `dispatch_payload` Examples
The following examples only show the `dispatch_payload` stanzas. Remember that the
`dispatch_payload` stanza is only valid in the placements listed above.
### Write Payload to a File
This example shows a `dispatch_payload` block in a parameterized job that writes
the payload to a `config.json` file.
```hcl
dispatch_payload {
file = "config.json"
}
```
[localdir]: /docs/runtime/environment.html#local_ "Task Local Directory"
[parameterized]: /docs/job-specification/parameterized.html "Nomad parameterized Job Specification"

View File

@@ -42,6 +42,10 @@ job "docs" {
"my-key" = "my-value"
}
parameterized {
# ...
}
periodic {
# ...
}
@@ -74,13 +78,16 @@ job "docs" {
- `datacenters` `(array<string>: <required>)` - A list of datacenters in the region which are eligible
for task placement. This must be provided, and does not have a default.
- `group` <code>([Group][group]: <required>)</code> - Specifies the start of a
- `group` <code>([Group][group]: \<required\>)</code> - Specifies the start of a
group of tasks. This can be provided multiple times to define additional
groups. Group names must be unique within the job file.
- `meta` <code>([Meta][]: nil)</code> - Specifies a key-value map that annotates
with user-defined metadata.
- `parameterized` <code>([Parameterized][parameterized]: nil)</code> - Specifies
the job as a paramterized job such that it can be dispatched against.
- `periodic` <code>([Periodic][]: nil)</code> - Allows the job to be scheduled
at fixed times, dates or intervals.
@@ -215,6 +222,7 @@ $ VAULT_TOKEN="..." nomad run example.nomad
[constraint]: /docs/job-specification/constraint.html "Nomad constraint Job Specification"
[group]: /docs/job-specification/group.html "Nomad group Job Specification"
[meta]: /docs/job-specification/meta.html "Nomad meta Job Specification"
[parameterized]: /docs/job-specification/parameterized.html "Nomad parameterized Job Specification"
[periodic]: /docs/job-specification/periodic.html "Nomad periodic Job Specification"
[task]: /docs/job-specification/task.html "Nomad task Job Specification"
[update]: /docs/job-specification/update.html "Nomad update Job Specification"

View File

@@ -0,0 +1,161 @@
---
layout: "docs"
page_title: "parameterized Stanza - Job Specification"
sidebar_current: "docs-job-specification-parameterized"
description: |-
A parameterized job is used to encapsulate a set of work that can be carried
out on various inputs much like a function definition. When the
`parameterized` stanza is added to a job, the job acts as a function to the
cluster as a whole.
---
# `parameterized` Stanza
<table class="table table-bordered table-striped">
<tr>
<th width="120">Placement</th>
<td>
<code>job -> **parameterized**</code>
</td>
</tr>
</table>
A parameterized job is used to encapsulate a set of work that can be carried out
on various inputs much like a function definition. When the `parameterized`
stanza is added to a job, the job acts as a function to the cluster as a whole.
The `parameterized` stanza allows job operators to configure a job that carries
out a particular action, define its resource requirements and configure how
inputs and configuration are retreived by the tasks within the job.
To invoke a parameterized job, [`nomad job
dispatch`][dispatch command] or the equivalent HTTP APIs are
used. When dispatching against a parameterized job, an opaque payload and
metadata may be injected into the job. These inputs to the parameterized job act
like arguments to a function. The job consumes them to change it's behavior,
without exposing the implementation details to the caller.
To that end, tasks within the job can add a
[`dispatch_payload`][dispatch_payload] stanza that
defines where on the filesystem this payload gets written to. An example payload
would be a task's JSON configuration.
Further, certain metadata may be marked as required when dispatching a job so it
can be used to inject configuration directly into a task's arguments using
[interpolation]. An example of this would be to require a run ID key that
could be used to lookup the work the job is suppose to do from a management
service or database.
Each time a job is dispatched, a unique job ID is generated. This allows a
caller to track the status of the job, much like a future or promise in some
programming languages.
```hcl
job "docs" {
parameterized {
payload = "required"
meta_required = ["dispatcher_email"]
meta_optional = ["pager_email"]
}
}
```
## `parameterized` Requirements
- The job's [scheduler type][batch-type] must be `batch`.
## `parameterized` Parameters
- `meta_optional` `(array<string>: nil)` - Specifies the set of metadata keys that
may be provided when dispatching against the job.
- `meta_required` `(array<string>: nil)` - Specifies the set of metadata keys that
must be provided when dispatching against the job.
- `payload` `(string: "optional")` - Specifies the requirement of providing a
payload when dispatching against the parameterized job. The **maximum size of a
`payload` is 16 KiB**. The options for this
field are:
- `"optional"` - A payload is optional when dispatching against the job.
- `"required"` - A payload must be provided when dispatching against the job.
- `"forbidden"` - A payload is forbidden when dispatching against the job.
## `parameterized` Examples
The following examples show non-runnable example parameterized jobs:
### Required Inputs
This example shows a parameterized job that requires both a payload and
metadata:
```hcl
job "video-encode" {
...
type = "batch"
parameterized {
payload = "required"
meta_required = ["dispatcher_email"]
}
group "encode" {
...
task "ffmpeg" {
driver = "exec"
config {
command = "ffmpeg-wrapper"
# When dispatched, the payload is written to a file that is then read by
# the created task upon startup
args = ["-config=${NOMAD_TASK_DIR}/config.json"]
}
dispatch_payload {
file = "config.json"
}
}
}
}
```
### Metadata Interpolation
```hcl
job "email-blast" {
...
type = "batch"
parameterized {
payload = "forbidden"
meta_required = ["CAMPAIGN_ID"]
}
group "emails" {
...
task "emailer" {
driver = "exec"
config {
command = "emailer"
# The campagain ID is interpolated and injected into the task's
# arguments
args = ["-campaign=${NOMAD_META_CAMPAIGN_ID}"]
}
}
}
}
```
[batch-type]: /docs/job-specification/job.html#type "Batch scheduler type"
[dispatch command]: /docs/commands/job-dispatch.html "Nomad Job Dispatch Command"
[resources]: /docs/job-specification/resources.html "Nomad resources Job Specification"
[interpolation]: /docs/runtime/interpolation.html "Nomad Runtime Interpolation"
[dispatch_payload]: /docs/job-specification/dispatch_payload.html "Nomad dispatch_payload Job Specification"

View File

@@ -35,6 +35,10 @@ job "docs" {
The periodic expression is always evaluated in the **UTC timezone** to ensure
consistent evaluation when Nomad spans multiple time zones.
## `periodic` Requirements
- The job's [scheduler type][batch-type] must be `batch`.
## `periodic` Parameters
- `cron` `(string: <required>)` - Specifies a cron expression configuring the
@@ -60,4 +64,5 @@ periodic {
}
```
[batch-type]: /docs/job-specification/job.html#type "Batch scheduler type"
[cron]: https://github.com/gorhill/cronexpr#implementation "List of cron expressions"

View File

@@ -37,6 +37,9 @@ job "docs" {
constraints on the task. This can be provided multiple times to define
additional constraints.
- `dispatch_payload` <code>([DispatchPayload][]: nil)</code> - Configures the
task to have access to dispatch payloads.
- `driver` - Specifies the task driver that should be used to run the
task. See the [driver documentation](/docs/drivers/index.html) for what
is available. Examples include `docker`, `qemu`, `java`, and `exec`.
@@ -163,6 +166,7 @@ task "server" {
[artifact]: /docs/job-specification/artifact.html "Nomad artifact Job Specification"
[consul]: https://www.consul.io/ "Consul by HashiCorp"
[constraint]: /docs/job-specification/constraint.html "Nomad constraint Job Specification"
[dispatchpayload]: /docs/job-specification/dispatch_payload.html "Nomad dispatch_payload Job Specification"
[env]: /docs/job-specification/env.html "Nomad env Job Specification"
[meta]: /docs/job-specification/meta.html "Nomad meta Job Specification"
[resources]: /docs/job-specification/resources.html "Nomad resources Job Specification"

View File

@@ -47,19 +47,17 @@ README][ct].
## `template` Parameters
- `source` `(string: "")` - Specifies the path to the template to be rendered.
One of `source` or `data` must be specified, but not both. This source can
optionally be fetched using an [`artifact`][artifact] resource. This template
must exist on the machine prior to starting the task; it is not possible to
reference a template inside of a Docker container, for example.
- `destination` `(string: <required>)` - Specifies the location where the
resulting template should be rendered, relative to the task directory.
- `change_signal` `(string: "")` - Specifies the signal to send to the task as a
string like `"SIGUSR1"` or `"SIGINT"`. This option is required if the
`change_mode` is `signal`.
- `data` `(string: "")` - Specifies the raw template to execute. One of `source`
or `data` must be specified, but not both. This is useful for smaller
templates, but we recommend using `source` for larger templates.
- `destination` `(string: <required>)` - Specifies the location where the
resulting template should be rendered, relative to the task directory.
- `change_mode` `(string: "restart")` - Specifies the behavior Nomad should take
if the rendered template changes. The possible values are:
@@ -67,9 +65,11 @@ README][ct].
- `"restart"` - restart the task
- `"signal"` - send a configurable signal to the task
- `change_signal` `(string: "")` - Specifies the signal to send to the task as a
string like `"SIGUSR1"` or `"SIGINT"`. This option is required if the
`change_mode` is `signal`.
- `source` `(string: "")` - Specifies the path to the template to be rendered.
One of `source` or `data` must be specified, but not both. This source can
optionally be fetched using an [`artifact`][artifact] resource. This template
must exist on the machine prior to starting the task; it is not possible to
reference a template inside of a Docker container, for example.
- `splay` `(string: "5s")` - Specifies a random amount of time to wait between
0ms and the given splay value before invoking the change mode. This is

View File

@@ -13,7 +13,7 @@ Many workloads require access to tokens, passwords, certificates, API keys, and
other secrets. To enable secure, auditable and easy access to your secrets,
Nomad integrates with HashiCorp's [Vault][]. Nomad servers and clients
coordinate with Vault to derive a Vault token that has access to only the Vault
policies the tasks needs. Nomad clients make the token avaliable to the task and
policies the tasks needs. Nomad clients make the token available to the task and
handle the tokens renewal. Further, Nomad's [`template` block][template] can
retrieve secrets from Vault making it easier than ever to secure your
infrastructure.
@@ -24,142 +24,215 @@ install Vault separately from Nomad. Nomad does not run Vault for you.
## Vault Configuration
To use the Vault integration, Nomad servers must be provided a Vault token. This
token can either be a root token or a token from a role. The root token is the
easiest way to get started, but we recommend a role-based token for production
installations. Nomad servers will renew the token automatically.
token can either be a root token or a periodic token with permissions to create
from a token role. The root token is the easiest way to get started, but we
recommend a token role based token for production installations. Nomad servers
will renew the token automatically.
### Root Token
### Root Token Integration
If Nomad is given a [root
token](https://www.vaultproject.io/docs/concepts/tokens.html#root-tokens), no
further configuration is needed as Nomad can derive a token for jobs using any
Vault policies.
### Role based Token
### Token Role based Integration
Vault's [Token Authentication Backend][auth] supports a concept called "roles".
Roles allow policies to be grouped together and token creation to be delegated
to a trusted service such as Nomad. By creating a role, the set of policies that
tasks managed by Nomad can access may be limited compared to giving Nomad a root
token.
Token roles allow policies to be grouped together and token creation to be
delegated to a trusted service such as Nomad. By creating a token role, the set
of policies that tasks managed by Nomad can access may be limited compared to
giving Nomad a root token. Token roles allow both white-list and blacklist
management of policies accessible to the role.
When given a non-root token, Nomad queries the token to determine the role it
was generated from. It will then derive tokens for jobs based on that role.
Nomad expects the role to be created with several properties described below
when creating the role with the Vault endpoint `/auth/token/roles/<role_name>`:
To configure Nomad and Vault to create tokens against a role, the following must
occur:
1. Create a "nomad-server" policy used by Nomad to create and manage tokens.
2. Create a Vault token role with the configuration described below.
3. Configure Nomad to use the created token role.
4. Give Nomad servers a periodic token with the "nomad-server" policy created
above.
#### Required Vault Policies
The token Nomad receives must have the capabilities listed below. An explanation
for the use of each capability is given.
```
# Allow creating tokens under "nomad-cluster" token role. The token role name
# should be updated if "nomad-cluster" is not used.
path "auth/token/create/nomad-cluster" {
capabilities = ["update"]
}
# Allow looking up "nomad-cluster" token role. The token role name should be
# updated if "nomad-cluster" is not used.
path "auth/token/roles/nomad-cluster" {
capabilities = ["read"]
}
# Allow looking up incoming tokens to validate they have permissions to access
# the tokens they are requesting. This is only required if
# `allow_unauthenticated` is set to false.
path "auth/token/lookup" {
capabilities = ["update"]
}
# Allow revoking tokens that should no longer exist. This allows revoking
# tokens for dead tasks.
path "auth/token/revoke-accessor" {
capabilities = ["update"]
}
# Allow checking the capabilities of our own token. This is used to validate the
# token upon startup.
path "/sys/capabilities-self" {
capabilities = ["update"]
}
# Allow our own token to be renewed.
path "auth/token/renew-self" {
capabilities = ["update"]
}
```
The above [`nomad-server` policy](/data/vault/nomad-server-policy.hcl) is
available for download. Below is an example of writing this policy to Vault:
```
# Download the policy
$ curl https://nomadproject.io/data/vault/nomad-server-policy.hcl -O -s -L
# Write the policy to Vault
$ vault policy-write nomad-server nomad-server-policy.hcl
```
#### Vault Token Role Configuration
A Vault token role must be created for use by Nomad. The token role can be used
to manage what Vault policies are accessible by jobs submitted to Nomad. The
policies can be managed as a whitelist by using `allowed_policies` in the token
role definition or as a blacklist by using `disallowed_policies`.
If using `allowed_policies`, task's may only request Vault policies that are in
the list. If `disallowed_policies` is used, task may request any policy that is
not in the `disallowed_policies` list. There are tradeoffs to both approaches
but generally it is easier to use the blacklist approach and add policies that
you would not like tasks to have access to into the `disallowed_policies` list.
An example token role definition is given below:
```json
{
"allowed_policies": "<comma-seperated list of policies>",
"disallowed_policies": "nomad-server",
"explicit_max_ttl": 0,
"name": "nomad",
"name": "nomad-cluster",
"orphan": false,
"period": 259200,
"renewable": true
}
```
#### Parameters:
##### Token Role Requirements
Nomad checks that token role has an appropriate configuration for use by the
cluster. Fields that are checked are documented below as well as descriptions of
the important fields. See Vault's [Token Authentication Backend][auth]
documentation for all possible fields and more complete documentation.
* `allowed_policies` - Specifies the list of allowed policies as a
comma-seperated string This list should contain all policies that jobs running
under Nomad should have access to. Further, the list must contain one or more
policies that gives Nomad the following permissions:
comma-separated string. This list should contain all policies that jobs running
under Nomad should have access to.
```hcl
# Allow creating tokens under the role
path "auth/token/create/nomad-server" {
capabilities = ["update"]
}
* `disallowed_policies` - Specifies the list of disallowed policies as a
comma-seperated string. This list should contain all policies that jobs running
under Nomad should **not** have access to. The policy created above that
grants Nomad the ability to generate tokens from the token role should be
included in list of disallowed policies. This prevents tokens created by
Nomad from generating new tokens with different policies than those granted
by Nomad.
# Allow looking up the role
path "auth/token/roles/nomad-server" {
capabilities = ["read"]
}
A regression occured in Vault 0.6.4 when validating token creation using a
token role with `disallowed_policies` such that it is not usable with
Nomad. This will be remedied in 0.6.5 and does not effect earlier versions
of Vault.
# Allow looking up incoming tokens to validate they have permissions to
# access the tokens they are requesting
path "auth/token/lookup/*" {
capabilities = ["read"]
}
# Allow revoking tokens that should no longer exist
path "/auth/token/revoke-accessor/*" {
capabilities = ["update"]
}
```
* `explicit_max_ttl` - Specifies the max TTL of a token. Must be set to `0` to
* `explicit_max_ttl` - Specifies the max TTL of a token. **Must be set to `0`** to
allow periodic tokens.
* `name` - Specifies the name of the policy. We recommend using the name
`nomad-server`. If a different name is chosen, replace the role in the above
policy.
`nomad-cluster`. If a different name is chosen, replace the token role in the
above policy.
* `orphan` - Specifies whether tokens created againsts this role will be
orphaned and have no parents. Must be set to `false`. This ensures that the
* `orphan` - Specifies whether tokens created against this token role will be
orphaned and have no parents. **Must be set to `false`**. This ensures that the
token can be revoked when the task is no longer needed or a node dies.
* `period` - Specifies the length the TTL is extended by each renewal in
seconds. It is suggested to set this value on the order of magnitude of 3 days
(259200 seconds) to avoid a large renewal request rate to Vault. Must be set
to a positive value.
(259200 seconds) to avoid a large renewal request rate to Vault. **Must be set
to a positive value**.
* `renewable` - Specifies whether created tokens are renewable. Must be set to
`true`. This allows Nomad to renew tokens for tasks.
* `renewable` - Specifies whether created tokens are renewable. **Must be set to
`true`**. This allows Nomad to renew tokens for tasks.
The above [`nomad-cluster` token role](/data/vault/nomad-cluster-role.json) is
available for download. Below is an example of writing this role to Vault:
```
# Download the token role
$ curl https://nomadproject.io/data/vault/nomad-cluster-role.json -O -s -L
# Create the token role with Vault
$ vault write /auth/token/roles/nomad-cluster @nomad-cluster-role.json
```
See Vault's [Token Authentication Backend][auth] documentation for all possible
fields and more complete documentation.
#### Example Configuration
To make getting started easy, the basic [`nomad-server`
policy](/data/vault/nomad-server-policy.hcl) and
[role](/data/vault/nomad-server-role.json) described above are available for
download.
[`nomad-cluster` role](/data/vault/nomad-cluster-role.json) described above are
available for download.
The below example assumes Vault is accessible, unsealed and the the operator has
The below example assumes Vault is accessible, unsealed and the operator has
appropriate permissions.
```shell
# Download the policy and role
# Download the policy and token role
$ curl https://nomadproject.io/data/vault/nomad-server-policy.hcl -O -s -L
$ curl https://nomadproject.io/data/vault/nomad-server-role.json -O -s -L
$ curl https://nomadproject.io/data/vault/nomad-cluster-role.json -O -s -L
# Write the policy to Vault
$ vault policy-write nomad-server nomad-server-policy.hcl
# Edit the role to add any policies that you would like to be accessible to
# Nomad jobs in the list of allowed_policies. Do not remove `nomad-server`.
$ editor nomad-server-role.json
# Create the role with Vault
$ vault write /auth/token/roles/nomad-server @nomad-server-role.json
# Create the token role with Vault
$ vault write /auth/token/roles/nomad-cluster @nomad-cluster-role.json
```
#### Retrieving the Role based Token
#### Retrieving the Token Role based Token
After the role is created, a token suitable for the Nomad servers may be
After the token role is created, a token suitable for the Nomad servers may be
retrieved by issuing the following Vault command:
```
$ vault token-create -role nomad-server
$ vault token-create -policy nomad-server -period 72h
Key Value
--- -----
token f02f01c2-c0d1-7cb7-6b88-8a14fada58c0
token_accessor 8cb7fcb3-9a4f-6fbf-0efc-83092bb0cb1c
token_duration 259200s
token_renewable true
token_policies [<policies>]
token_policies [default nomad-server]
```
The token can then be set in the server configuration's [vault block][config],
as a command-line flag, or via an environment variable.
```
$ nomad agent -config /path/to/config -vault-token=f02f01c2-c0d1-7cb7-6b88-8a14fada58c0
```
```
$ VAULT_TOKEN=f02f01c2-c0d1-7cb7-6b88-8a14fada58c0 nomad agent -config /path/to/config
```
@@ -177,18 +250,17 @@ specification documentation][vault-spec].
## Troubleshooting
Upon startup, Nomad will attempt to connect to the specified Vault server. Nomad
will lookup the passed token and if the token is from a role, the role will be
validated. Nomad will not shutdown if given an invalid Vault token, but will log
the reasons the token is invalid and disable Vault integration.
will lookup the passed token and if the token is from a token role, the token
role will be validated. Nomad will not shutdown if given an invalid Vault token,
but will log the reasons the token is invalid and disable Vault integration.
## Assumptions
- Vault 0.6.2 or later is needed.
- Nomad is given either a root token or a token created from an approriate role.
[auth]: https://www.vaultproject.io/docs/auth/token.html "Vault Authentication Backend"
[config]: /docs/agent/configuration/vault.html "Nomad Vault configuration block"
[config]: /docs/agent/configuration/vault.html "Nomad Vault Configuration Block"
[createfromrole]: /docs/agent/configuration/vault.html#create_from_role "Nomad vault create_from_role Configuration Flag"
[template]: /docs/job-specification/template.html "Nomad template Job Specification"
[vault]: https://www.vaultproject.io/ "Vault by HashiCorp"
[vault-spec]: /docs/job-specification/vault.html "Nomad Vault Job Specification"

View File

@@ -15,7 +15,19 @@
<%= javascript_include_tag "ie-compat" %>
<![endif]-->
<!-- Google Tag Manager -->
<script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-NR2SD7C');</script>
<!-- End Google Tag Manager -->
<%= yield_content :head %>
</head>
<body id="<%= body_id_for(current_page) %>" class="<%= body_classes_for(current_page) %>">
<!-- Google Tag Manager (noscript) -->
<noscript><iframe src="https://www.googletagmanager.com/ns.html?id=GTM-NR2SD7C"
height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript>
<!-- End Google Tag Manager (noscript) -->

View File

@@ -48,6 +48,9 @@
<li<%= sidebar_current("docs-job-specification-constraint")%>>
<a href="/docs/job-specification/constraint.html">constraint</a>
</li>
<li<%= sidebar_current("docs-job-specification-dispatch-payload")%>>
<a href="/docs/job-specification/dispatch_payload.html">dispatch_payload</a>
</li>
<li<%= sidebar_current("docs-job-specification-env")%>>
<a href="/docs/job-specification/env.html">env</a>
</li>
@@ -69,6 +72,9 @@
<li<%= sidebar_current("docs-job-specification-network")%>>
<a href="/docs/job-specification/network.html">network</a>
</li>
<li<%= sidebar_current("docs-job-specification-parameterized")%>>
<a href="/docs/job-specification/parameterized.html">parameterized</a>
</li>
<li<%= sidebar_current("docs-job-specification-periodic")%>>
<a href="/docs/job-specification/periodic.html">periodic</a>
</li>
@@ -204,6 +210,9 @@
<li<%= sidebar_current("docs-commands-inspect") %>>
<a href="/docs/commands/inspect.html">inspect</a>
</li>
<li<%= sidebar_current("docs-commands-job-dispatch") %>>
<a href="/docs/commands/job-dispatch.html">job dispatch</a>
</li>
<li<%= sidebar_current("docs-commands-keygen") %>>
<a href="/docs/commands/keygen.html">keygen</a>
</li>