Merge branch 'master' into b-cv-add-job-anti-affinity-sentence

This commit is contained in:
Charlie Voiselle
2017-08-01 12:35:33 -04:00
19 changed files with 296 additions and 203 deletions

View File

@@ -1,5 +1,15 @@
## 0.6.1 (Unreleased)
IMPROVEMENTS:
* driver/rkt: support read-only volume mounts [GH-2883]
BUG FIXES:
* core: Fix incorrect destructive update with `distinct_property` constraint
[GH-2939]
* cli: Fix autocmpleting global flags [GH-2928]
* cli: Fix panic when using 0.6.0 cli with an older cluster [GH-2929]
* driver/docker: Fix leaking plugin file used by syslog server [GH-2937]
## 0.6.0 (July 26, 2017)
__BACKWARDS INCOMPATIBILITIES:__

View File

@@ -1,7 +1,6 @@
PACKAGES = $(shell go list ./... | grep -v '/vendor/')
EXTERNAL_TOOLS=\
github.com/kardianos/govendor \
github.com/mitchellh/gox \
golang.org/x/tools/cmd/cover \
github.com/axw/gocov/gocov \
gopkg.in/matm/v1/gocov-html \

View File

@@ -1,7 +1,8 @@
Nomad [![Build Status](https://travis-ci.org/hashicorp/nomad.svg)](https://travis-ci.org/hashicorp/nomad) [![Join the chat at https://gitter.im/hashicorp-nomad/Lobby](https://badges.gitter.im/hashicorp-nomad/Lobby.svg)](https://gitter.im/hashicorp-nomad/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
=========
- Website: https://www.nomadproject.io
- Mailing list: [Google Groups](https://groups.google.com/group/nomad-tool)
* Website: [www.nomadproject.io](https://www.nomadproject.io)
* Mailing list: [Google Groups](https://groups.google.com/group/nomad-tool)
<p align="center" style="text-align:center;">
<img src="https://cdn.rawgit.com/hashicorp/nomad/master/website/source/assets/images/logo-text.svg" width="500" />
@@ -78,7 +79,7 @@ needed dependencies.
**Developing locally**
For local dev first make sure Go is properly installed, including setting up a
[GOPATH](https://golang.org/doc/code.html#GOPATH). After setting up Go, clone this
[GOPATH](https://golang.org/doc/code.html#GOPATH). After setting up Go, clone this
repository into `$GOPATH/src/github.com/hashicorp/nomad`. Then you can
download the required build tools such as vet, cover, godep etc by bootstrapping
your environment.

View File

@@ -37,11 +37,17 @@ func (s *SyslogServer) Start() {
for {
select {
case <-s.doneCh:
s.listener.Close()
return
default:
connection, err := s.listener.Accept()
if err != nil {
s.doneLock.Lock()
done := s.done
s.doneLock.Unlock()
if done {
return
}
s.logger.Printf("[ERR] logcollector.server: error in accepting connection: %v", err)
continue
}
@@ -74,11 +80,12 @@ func (s *SyslogServer) read(connection net.Conn) {
// Shutdown shutsdown the syslog server
func (s *SyslogServer) Shutdown() {
s.doneLock.Lock()
s.doneLock.Unlock()
defer s.doneLock.Unlock()
if !s.done {
close(s.doneCh)
close(s.messages)
s.done = true
s.listener.Close()
}
}

View File

@@ -80,7 +80,7 @@ type RktDriverConfig struct {
Net []string `mapstructure:"net"` // Networks for the containers
PortMapRaw []map[string]string `mapstructure:"port_map"` //
PortMap map[string]string `mapstructure:"-"` // A map of host port and the port name defined in the image manifest file
Volumes []string `mapstructure:"volumes"` // Host-Volumes to mount in, syntax: /path/to/host/directory:/destination/path/in/container
Volumes []string `mapstructure:"volumes"` // Host-Volumes to mount in, syntax: /path/to/host/directory:/destination/path/in/container[:readOnly]
InsecureOptions []string `mapstructure:"insecure_options"` // list of args for --insecure-options
NoOverlay bool `mapstructure:"no_overlay"` // disable overlayfs for rkt run
@@ -319,11 +319,22 @@ func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse,
}
for i, rawvol := range driverConfig.Volumes {
parts := strings.Split(rawvol, ":")
if len(parts) != 2 {
readOnly := "false"
// job spec:
// volumes = ["/host/path:/container/path[:readOnly]"]
// the third parameter is optional, mount is read-write by default
if len(parts) == 3 {
if parts[2] == "readOnly" {
d.logger.Printf("[DEBUG] Mounting %s:%s as readOnly", parts[0], parts[1])
readOnly = "true"
} else {
d.logger.Printf("[WARN] Unknown volume parameter '%s' ignored for mount %s", parts[2], parts[0])
}
} else if len(parts) != 2 {
return nil, fmt.Errorf("invalid rkt volume: %q", rawvol)
}
volName := fmt.Sprintf("%s-%s-%d", d.DriverContext.allocID, sanitizedName, i)
cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", volName, parts[0]))
cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s,readOnly=%s", volName, parts[0], readOnly))
cmdArgs = append(cmdArgs, fmt.Sprintf("--mount=volume=%s,target=%s", volName, parts[1]))
}
}

View File

@@ -2,6 +2,7 @@ package agent
import (
"bytes"
"github.com/hashicorp/go-syslog"
"github.com/hashicorp/logutils"
)

View File

@@ -199,7 +199,7 @@ func formatAllocBasicInfo(alloc *api.Allocation, client *api.Client, uuidLength
fmt.Sprintf("Name|%s", alloc.Name),
fmt.Sprintf("Node ID|%s", limit(alloc.NodeID, uuidLength)),
fmt.Sprintf("Job ID|%s", alloc.JobID),
fmt.Sprintf("Job Version|%d", *alloc.Job.Version),
fmt.Sprintf("Job Version|%d", getVersion(alloc.Job)),
fmt.Sprintf("Client Status|%s", alloc.ClientStatus),
fmt.Sprintf("Client Description|%s", alloc.ClientDescription),
fmt.Sprintf("Desired Status|%s", alloc.DesiredStatus),

View File

@@ -292,3 +292,29 @@ func (j *JobGetter) ApiJob(jpath string) (*api.Job, error) {
return jobStruct, nil
}
// COMPAT: Remove in 0.7.0
// Nomad 0.6.0 introduces the submit time field so CLI's interacting with
// older versions of Nomad would SEGFAULT as reported here:
// https://github.com/hashicorp/nomad/issues/2918
// getSubmitTime returns a submit time of the job converting to time.Time
func getSubmitTime(job *api.Job) time.Time {
if job.SubmitTime != nil {
return time.Unix(0, *job.SubmitTime)
}
return time.Time{}
}
// COMPAT: Remove in 0.7.0
// Nomad 0.6.0 introduces job Versions so CLI's interacting with
// older versions of Nomad would SEGFAULT as reported here:
// https://github.com/hashicorp/nomad/issues/2918
// getVersion returns a version of the job in safely.
func getVersion(job *api.Job) uint64 {
if job.Version != nil {
return *job.Version
}
return 0
}

View File

@@ -138,11 +138,11 @@ func (c *StatusCommand) Run(args []string) int {
basic := []string{
fmt.Sprintf("ID|%s", *job.ID),
fmt.Sprintf("Name|%s", *job.Name),
fmt.Sprintf("Submit Date|%s", formatTime(time.Unix(0, *job.SubmitTime))),
fmt.Sprintf("Submit Date|%s", formatTime(getSubmitTime(job))),
fmt.Sprintf("Type|%s", *job.Type),
fmt.Sprintf("Priority|%d", *job.Priority),
fmt.Sprintf("Datacenters|%s", strings.Join(job.Datacenters, ",")),
fmt.Sprintf("Status|%s", getStatusString(*job.Status, *job.Stop)),
fmt.Sprintf("Status|%s", getStatusString(*job.Status, job.Stop)),
fmt.Sprintf("Periodic|%v", periodic),
fmt.Sprintf("Parameterized|%v", parameterized),
}
@@ -421,7 +421,7 @@ func formatAllocList(allocations []*api.Allocation, verbose bool, uuidLength int
limit(alloc.EvalID, uuidLength),
limit(alloc.NodeID, uuidLength),
alloc.TaskGroup,
*alloc.Job.Version,
getVersion(alloc.Job),
alloc.DesiredStatus,
alloc.ClientStatus,
formatUnixNanoTime(alloc.CreateTime))
@@ -433,7 +433,7 @@ func formatAllocList(allocations []*api.Allocation, verbose bool, uuidLength int
limit(alloc.ID, uuidLength),
limit(alloc.NodeID, uuidLength),
alloc.TaskGroup,
*alloc.Job.Version,
getVersion(alloc.Job),
alloc.DesiredStatus,
alloc.ClientStatus,
formatUnixNanoTime(alloc.CreateTime))
@@ -534,7 +534,7 @@ func createStatusListOutput(jobs []*api.JobListStub) string {
job.ID,
getTypeString(job),
job.Priority,
getStatusString(job.Status, job.Stop),
getStatusString(job.Status, &job.Stop),
formatTime(time.Unix(0, job.SubmitTime)))
}
return formatList(out)
@@ -554,8 +554,8 @@ func getTypeString(job *api.JobListStub) string {
return t
}
func getStatusString(status string, stop bool) string {
if stop {
func getStatusString(status string, stop *bool) string {
if stop != nil && *stop {
return fmt.Sprintf("%s (stopped)", status)
}
return status

View File

@@ -14,7 +14,7 @@ func (c *StopCommand) Help() string {
Usage: nomad stop [options] <job>
Stop an existing job. This command is used to signal allocations
to shut down for the given job ID. Upon successful deregistraion,
to shut down for the given job ID. Upon successful deregistration,
an interactive monitor session will start to display log lines as
the job unwinds its allocations and completes shutting down. It
is safe to exit the monitor early using ctrl+c.

View File

@@ -11,6 +11,7 @@ import (
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/assert"
)
func TestServiceSched_JobRegister(t *testing.T) {
@@ -493,6 +494,84 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) {
h.AssertEvalStatus(t, structs.EvalStatusComplete)
}
func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) {
h := NewHarness(t)
assert := assert.New(t)
// Create a job that uses distinct property over the node-id
job := mock.Job()
job.TaskGroups[0].Count = 3
job.TaskGroups[0].Constraints = append(job.TaskGroups[0].Constraints,
&structs.Constraint{
Operand: structs.ConstraintDistinctProperty,
LTarget: "${node.unique.id}",
})
assert.Nil(h.State.UpsertJob(h.NextIndex(), job), "UpsertJob")
// Create some nodes
var nodes []*structs.Node
for i := 0; i < 6; i++ {
node := mock.Node()
nodes = append(nodes, node)
assert.Nil(h.State.UpsertNode(h.NextIndex(), node), "UpsertNode")
}
// Create some allocations
var allocs []*structs.Allocation
for i := 0; i < 3; i++ {
alloc := mock.Alloc()
alloc.Job = job
alloc.JobID = job.ID
alloc.NodeID = nodes[i].ID
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
allocs = append(allocs, alloc)
}
assert.Nil(h.State.UpsertAllocs(h.NextIndex(), allocs), "UpsertAllocs")
// Update the count
job2 := job.Copy()
job2.TaskGroups[0].Count = 6
assert.Nil(h.State.UpsertJob(h.NextIndex(), job2), "UpsertJob")
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
JobID: job.ID,
}
// Process the evaluation
assert.Nil(h.Process(NewServiceScheduler, eval), "Process")
// Ensure a single plan
assert.Len(h.Plans, 1, "Number of plans")
plan := h.Plans[0]
// Ensure the plan doesn't have annotations.
assert.Nil(plan.Annotations, "Plan.Annotations")
// Ensure the eval hasn't spawned blocked eval
assert.Len(h.CreateEvals, 0, "Created Evals")
// Ensure the plan allocated
var planned []*structs.Allocation
for _, allocList := range plan.NodeAllocation {
planned = append(planned, allocList...)
}
assert.Len(planned, 6, "Planned Allocations")
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
assert.Nil(err, "AllocsByJob")
// Ensure all allocations placed
assert.Len(out, 6, "Placed Allocations")
h.AssertEvalStatus(t, structs.EvalStatusComplete)
}
func TestServiceSched_JobRegister_Annotate(t *testing.T) {
h := NewHarness(t)

View File

@@ -56,6 +56,12 @@ func (p *propertySet) SetJobConstraint(constraint *structs.Constraint) {
// Store the constraint
p.constraint = constraint
p.populateExisting(constraint)
// Populate the proposed when setting the constraint. We do this because
// when detecting if we can inplace update an allocation we stage an
// eviction and then select. This means the plan has an eviction before a
// single select has finished.
p.PopulateProposed()
}
// SetTGConstraint is used to parameterize the property set for a
@@ -67,8 +73,13 @@ func (p *propertySet) SetTGConstraint(constraint *structs.Constraint, taskGroup
// Store the constraint
p.constraint = constraint
p.populateExisting(constraint)
// Populate the proposed when setting the constraint. We do this because
// when detecting if we can inplace update an allocation we stage an
// eviction and then select. This means the plan has an eviction before a
// single select has finished.
p.PopulateProposed()
}
// populateExisting is a helper shared when setting the constraint to populate

View File

@@ -18,6 +18,9 @@ cli is the library that powers the CLI for
* Optional support for default subcommands so `cli` does something
other than error.
* Support for shell autocompletion of subcommands, flags, and arguments
with callbacks in Go. You don't need to write any shell code.
* Automatic help generation for listing subcommands
* Automatic help flag recognition of `-h`, `--help`, etc.

View File

@@ -153,6 +153,14 @@ func (c *CLI) IsVersion() bool {
func (c *CLI) Run() (int, error) {
c.once.Do(c.init)
// If this is a autocompletion request, satisfy it. This must be called
// first before anything else since its possible to be autocompleting
// -help or -version or other flags and we want to show completions
// and not actually write the help or version.
if c.Autocomplete && c.autocomplete.Complete() {
return 0, nil
}
// Just show the version and exit if instructed.
if c.IsVersion() && c.Version != "" {
c.HelpWriter.Write([]byte(c.Version + "\n"))
@@ -197,11 +205,6 @@ func (c *CLI) Run() (int, error) {
return 0, nil
}
// If this is a autocompletion request, satisfy it
if c.autocomplete.Complete() {
return 0, nil
}
}
// Attempt to get the factory function for creating the command

6
vendor/vendor.json vendored
View File

@@ -976,10 +976,10 @@
"revision": "7e024ce8ce18b21b475ac6baf8fa3c42536bf2fa"
},
{
"checksumSHA1": "F9rKfF4/KI5jhVBBIMuf6eDnTN0=",
"checksumSHA1": "cwT95naFga0RFGUZsCT1NeX5ncI=",
"path": "github.com/mitchellh/cli",
"revision": "4796e5fef694378c14b647f7221591afa58e38cd",
"revisionTime": "2017-07-17T21:49:25Z"
"revision": "921cc83dadc195c0cd67f9df3a6ec822400a1df5",
"revisionTime": "2017-07-25T23:05:51Z"
},
{
"checksumSHA1": "ttEN1Aupb7xpPMkQLqb3tzLFdXs=",

View File

@@ -18,177 +18,104 @@ $ nomad run -output my-job.nomad
## Syntax
Below is an example of a JSON object that submits a `periodic` job to Nomad:
Below is the JSON representation of the job outputed by `$ nomad init`:
```json
{
"Job": {
"Region": "global",
"ID": "example",
"Name": "example",
"Type": "batch",
"Priority": 50,
"AllAtOnce": false,
"Datacenters": [
"dc1"
],
"Constraints": [
{
"LTarget": "${attr.kernel.name}",
"RTarget": "linux",
"Operand": "="
}
],
"TaskGroups": [
{
"Name": "cache",
"Count": 1,
"Constraints": null,
"Tasks": [
{
"Name": "redis",
"Driver": "docker",
"User": "foo-user",
"Config": {
"image": "redis:latest",
"port_map": [
{
"db": 6379
}
]
},
"Constraints": null,
"Env": {
"foo": "bar",
"baz": "pipe"
},
"Services": [
{
"Name": "cache-redis",
"Tags": [
"global",
"cache"
],
"PortLabel": "db",
"Checks": [
{
"Id": "",
"Name": "alive",
"Type": "tcp",
"Command": "",
"Args": null,
"Path": "",
"Protocol": "",
"Interval": 10000000000,
"Timeout": 2000000000
}
]
}
],
"Update": {
"Stagger": 10000000000,
"MaxParallel": 3,
"HealthCheck": "checks",
"MinHealthyTime": 15000000000,
"HealthyDeadline": 180000000000,
"AutoRevert": false,
"Canary": 1
},
"Vault": {
"Policies": [
"policy-name"
],
"Env": true,
"ChangeMode": "restart",
"ChangeSignal": ""
},
"Resources": {
"CPU": 500,
"MemoryMB": 256,
"IOPS": 0,
"Networks": [
{
"ReservedPorts": [
{
"Label": "rpc",
"Value": 25566
}
],
"DynamicPorts": [
{
"Label": "db"
}
],
"MBits": 10
}
]
},
"Meta": {
"foo": "bar",
"baz": "pipe"
},
"KillTimeout": 5000000000,
"LogConfig": {
"MaxFiles": 10,
"MaxFileSizeMB": 10
},
"Templates": [
{
"SourcePath": "local/config.conf.tpl",
"DestPath": "local/config.conf",
"EmbeddedTmpl": "",
"ChangeMode": "signal",
"ChangeSignal": "SIGUSR1",
"Splay": 5000000000
}
],
"Artifacts": [
{
"GetterSource": "http://foo.com/artifact.tar.gz",
"GetterOptions": {
"checksum": "md5:c4aa853ad2215426eb7d70a21922e794"
},
"RelativeDest": "local/"
}
],
"DispatchPayload": {
"File": "config.json"
}
}
"Job": {
"ID": "example",
"Name": "example",
"Type": "service",
"Priority": 50,
"Datacenters": [
"dc1"
],
"RestartPolicy": {
"Interval": 300000000000,
"Attempts": 10,
"Delay": 25000000000,
"Mode": "delay"
},
"Meta": {
"foo": "bar",
"baz": "pipe"
"TaskGroups": [{
"Name": "cache",
"Count": 1,
"Tasks": [{
"Name": "redis",
"Driver": "docker",
"User": "",
"Config": {
"image": "redis:3.2",
"port_map": [{
"db": 6379
}]
},
"Services": [{
"Id": "",
"Name": "global-redis-check",
"Tags": [
"global",
"cache"
],
"PortLabel": "db",
"AddressMode": "",
"Checks": [{
"Id": "",
"Name": "alive",
"Type": "tcp",
"Command": "",
"Args": null,
"Path": "",
"Protocol": "",
"PortLabel": "",
"Interval": 10000000000,
"Timeout": 2000000000,
"InitialStatus": "",
"TLSSkipVerify": false
}]
}],
"Resources": {
"CPU": 500,
"MemoryMB": 256,
"Networks": [{
"Device": "",
"CIDR": "",
"IP": "",
"MBits": 10,
"DynamicPorts": [{
"Label": "db",
"Value": 0
}]
}]
},
"Leader": false
}],
"RestartPolicy": {
"Interval": 300000000000,
"Attempts": 10,
"Delay": 25000000000,
"Mode": "delay"
},
"EphemeralDisk": {
"SizeMB": 300
}
}],
"Update": {
"MaxParallel": 1,
"MinHealthyTime": 10000000000,
"HealthyDeadline": 180000000000,
"AutoRevert": false,
"Canary": 0
}
}
],
"Periodic": {
"Enabled": true,
"Spec": "- *",
"SpecType": "cron",
"ProhibitOverlap": true
},
"Meta": {
"foo": "bar",
"baz": "pipe"
},
"ParameterizedJob": {
"Payload": "required",
"MetaRequired": [
"foo"
],
"MetaOptional": [
"bar"
]
},
"Payload": null
}
}
}
```
The example JSON could be submitted as a job using the following:
```text
$ curl -XPUT @d example.json http://127.0.0.1:4646/v1/job/example
{
"EvalID": "5d6ded54-0b2a-8858-6583-be5f476dec9d",
"EvalCreateIndex": 12,
"JobModifyIndex": 11,
"Warnings": "",
"Index": 12,
"LastContact": 0,
"KnownLeader": false
}
```

View File

@@ -103,12 +103,14 @@ The `rkt` driver supports the following configuration in the job spec:
* `no_overlay` - (Optional) When enabled, will use `--no-overlay=true` flag for 'rkt run'.
Useful when running jobs on older systems affected by https://github.com/rkt/rkt/issues/1922
* `volumes` - (Optional) A list of `host_path:container_path` strings to bind
* `volumes` - (Optional) A list of `host_path:container_path[:readOnly]` strings to bind
host paths to container paths.
Mount is done read-write by default; an optional third parameter `readOnly` can be provided
to make it read-only.
```hcl
config {
volumes = ["/path/on/host:/path/in/container"]
volumes = ["/path/on/host:/path/in/container", "/readonly/path/on/host:/path/in/container:readOnly"]
}
```

View File

@@ -41,8 +41,8 @@ job "docs" {
```
Nomad supports downloading `http`, `https`, `git`, `hg` and `S3` artifacts. If
these artifacts are archived (`zip`, `tgz`, `bz2`), they are automatically
unarchived before the starting the task.
these artifacts are archived (`zip`, `tgz`, `bz2`, `xz`), they are
automatically unarchived before the starting the task.
## `artifact` Parameters

View File

@@ -62,7 +62,7 @@ occur:
The token Nomad receives must have the capabilities listed below. An explanation
for the use of each capability is given.
```
```hcl
# Allow creating tokens under "nomad-cluster" token role. The token role name
# should be updated if "nomad-cluster" is not used.
path "auth/token/create/nomad-cluster" {
@@ -236,12 +236,24 @@ token_renewable true
token_policies [default nomad-server]
```
`-orphan` is included above to prevent revocation of the token when its parent expires.
See the [Vault token hierarchy](https://www.vaultproject.io/docs/concepts/tokens.html#token-hierarchies-and-orphan-tokens)
documentation for more information.
The `-orphan` flag is included when generating the Nomad server token above to
prevent revocation of the token when its parent expires. Vault typically
creates tokens with a parent-child relationship. When an ancestor token is
revoked, all of its descendant tokens and their associated leases are revoked
as well.
The token can then be set in the server configuration's [vault block][config],
as a command-line flag, or via an environment variable.
When generating Nomad's Vault token, we need to ensure that revocation of the
parent token does not revoke Nomad's token. To prevent this behavior we
specify the `-orphan` flag when we create the Nomad's Vault token. All
other tokens generated by Nomad for jobs will be generated using the policy
default of `orphan = false`.
More information about creating orphan tokens can be found in
[Vault's Token Hierarchies and Orphan Tokens documentation][tokenhierarchy].
The token can then be set in the server configuration's
[`vault` stanza][config], as a command-line flag, or via an environment
variable.
```
$ VAULT_TOKEN=f02f01c2-c0d1-7cb7-6b88-8a14fada58c0 nomad agent -config /path/to/config
@@ -289,3 +301,4 @@ but will log the reasons the token is invalid and disable Vault integration.
[template]: /docs/job-specification/template.html "Nomad template Job Specification"
[vault]: https://www.vaultproject.io/ "Vault by HashiCorp"
[vault-spec]: /docs/job-specification/vault.html "Nomad Vault Job Specification"
[tokenhierarchy]: https://www.vaultproject.io/docs/concepts/tokens.html#token-hierarchies-and-orphan-tokens "Vault Tokens - Token Hierarchies and Orphan Tokens"