mirror of
https://github.com/kemko/nomad.git
synced 2026-01-05 18:05:42 +03:00
New update block; still need to handle the upgrade path
This commit is contained in:
109
api/jobs.go
109
api/jobs.go
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/gorhill/cronexpr"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -247,10 +248,109 @@ type periodicForceResponse struct {
|
||||
EvalID string
|
||||
}
|
||||
|
||||
// UpdateStrategy is for serializing update strategy for a job.
|
||||
// UpdateStrategy defines a task groups update strategy.
|
||||
type UpdateStrategy struct {
|
||||
Stagger time.Duration
|
||||
MaxParallel int `mapstructure:"max_parallel"`
|
||||
// COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0.
|
||||
Stagger time.Duration `mapstructure:"stagger"`
|
||||
MaxParallel *int `mapstructure:"max_parallel"`
|
||||
HealthCheck *string `mapstructure:"health_check"`
|
||||
MinHealthyTime *time.Duration `mapstructure:"min_healthy_time"`
|
||||
HealthyDeadline *time.Duration `mapstructure:"healthy_deadline"`
|
||||
AutoRevert *bool `mapstructure:"auto_revert"`
|
||||
Canary *int `mapstructure:"canary"`
|
||||
}
|
||||
|
||||
func (u *UpdateStrategy) Copy() *UpdateStrategy {
|
||||
if u == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
copy := new(UpdateStrategy)
|
||||
|
||||
// COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0.
|
||||
copy.Stagger = u.Stagger
|
||||
|
||||
if u.MaxParallel != nil {
|
||||
copy.MaxParallel = helper.IntToPtr(*u.MaxParallel)
|
||||
}
|
||||
|
||||
if u.HealthCheck != nil {
|
||||
copy.HealthCheck = helper.StringToPtr(*u.HealthCheck)
|
||||
}
|
||||
|
||||
if u.MinHealthyTime != nil {
|
||||
copy.MinHealthyTime = helper.TimeToPtr(*u.MinHealthyTime)
|
||||
}
|
||||
|
||||
if u.HealthyDeadline != nil {
|
||||
copy.HealthyDeadline = helper.TimeToPtr(*u.HealthyDeadline)
|
||||
}
|
||||
|
||||
if u.AutoRevert != nil {
|
||||
copy.AutoRevert = helper.BoolToPtr(*u.AutoRevert)
|
||||
}
|
||||
|
||||
if u.Canary != nil {
|
||||
copy.Canary = helper.IntToPtr(*u.Canary)
|
||||
}
|
||||
|
||||
return copy
|
||||
}
|
||||
|
||||
func (u *UpdateStrategy) Merge(o *UpdateStrategy) {
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if o.MaxParallel != nil {
|
||||
u.MaxParallel = helper.IntToPtr(*o.MaxParallel)
|
||||
}
|
||||
|
||||
if o.HealthCheck != nil {
|
||||
u.HealthCheck = helper.StringToPtr(*o.HealthCheck)
|
||||
}
|
||||
|
||||
if o.MinHealthyTime != nil {
|
||||
u.MinHealthyTime = helper.TimeToPtr(*o.MinHealthyTime)
|
||||
}
|
||||
|
||||
if o.HealthyDeadline != nil {
|
||||
u.HealthyDeadline = helper.TimeToPtr(*o.HealthyDeadline)
|
||||
}
|
||||
|
||||
if o.AutoRevert != nil {
|
||||
u.AutoRevert = helper.BoolToPtr(*o.AutoRevert)
|
||||
}
|
||||
|
||||
if o.Canary != nil {
|
||||
u.Canary = helper.IntToPtr(*o.Canary)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UpdateStrategy) Canonicalize() {
|
||||
if u.MaxParallel == nil {
|
||||
u.MaxParallel = helper.IntToPtr(0)
|
||||
}
|
||||
|
||||
if u.HealthCheck == nil {
|
||||
u.HealthCheck = helper.StringToPtr(structs.UpdateStrategyHealthCheck_Checks)
|
||||
}
|
||||
|
||||
if u.HealthyDeadline == nil {
|
||||
u.HealthyDeadline = helper.TimeToPtr(5 * time.Minute)
|
||||
}
|
||||
|
||||
if u.MinHealthyTime == nil {
|
||||
u.MinHealthyTime = helper.TimeToPtr(10 * time.Second)
|
||||
}
|
||||
|
||||
if u.AutoRevert == nil {
|
||||
u.AutoRevert = helper.BoolToPtr(false)
|
||||
}
|
||||
|
||||
if u.Canary == nil {
|
||||
u.Canary = helper.IntToPtr(0)
|
||||
}
|
||||
}
|
||||
|
||||
// PeriodicConfig is for serializing periodic config for a job.
|
||||
@@ -399,6 +499,9 @@ func (j *Job) Canonicalize() {
|
||||
if j.Periodic != nil {
|
||||
j.Periodic.Canonicalize()
|
||||
}
|
||||
if j.Update != nil {
|
||||
j.Update.Canonicalize()
|
||||
}
|
||||
|
||||
for _, tg := range j.TaskGroups {
|
||||
tg.Canonicalize(j)
|
||||
|
||||
17
api/tasks.go
17
api/tasks.go
@@ -143,6 +143,7 @@ type TaskGroup struct {
|
||||
Tasks []*Task
|
||||
RestartPolicy *RestartPolicy
|
||||
EphemeralDisk *EphemeralDisk
|
||||
Update *UpdateStrategy
|
||||
Meta map[string]string
|
||||
}
|
||||
|
||||
@@ -170,6 +171,22 @@ func (g *TaskGroup) Canonicalize(job *Job) {
|
||||
g.EphemeralDisk.Canonicalize()
|
||||
}
|
||||
|
||||
// Merge the update policy from the job
|
||||
if ju, tu := job.Update != nil, g.Update != nil; ju && tu {
|
||||
// Merge the jobs and task groups definition of the update strategy
|
||||
jc := job.Update.Copy()
|
||||
jc.Merge(g.Update)
|
||||
g.Update = jc
|
||||
} else if ju {
|
||||
// Inherit the jobs
|
||||
jc := job.Update.Copy()
|
||||
g.Update = jc
|
||||
}
|
||||
|
||||
if g.Update != nil {
|
||||
g.Update.Canonicalize()
|
||||
}
|
||||
|
||||
var defaultRestartPolicy *RestartPolicy
|
||||
switch *job.Type {
|
||||
case "service", "system":
|
||||
|
||||
@@ -465,10 +465,13 @@ func ApiJobToStructJob(job *api.Job) *structs.Job {
|
||||
}
|
||||
}
|
||||
|
||||
// COMPAT: Remove in 0.7.0. Update has been pushed into the task groups
|
||||
if job.Update != nil {
|
||||
j.Update = structs.UpdateStrategy{
|
||||
Stagger: job.Update.Stagger,
|
||||
MaxParallel: job.Update.MaxParallel,
|
||||
Stagger: job.Update.Stagger,
|
||||
}
|
||||
if job.Update.MaxParallel != nil {
|
||||
j.Update.MaxParallel = *job.Update.MaxParallel
|
||||
}
|
||||
}
|
||||
|
||||
@@ -532,6 +535,17 @@ func ApiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) {
|
||||
Migrate: *taskGroup.EphemeralDisk.Migrate,
|
||||
}
|
||||
|
||||
if taskGroup.Update != nil {
|
||||
tg.Update = &structs.UpdateStrategy{
|
||||
MaxParallel: *taskGroup.Update.MaxParallel,
|
||||
HealthCheck: *taskGroup.Update.HealthCheck,
|
||||
MinHealthyTime: *taskGroup.Update.MinHealthyTime,
|
||||
HealthyDeadline: *taskGroup.Update.HealthyDeadline,
|
||||
AutoRevert: *taskGroup.Update.AutoRevert,
|
||||
Canary: *taskGroup.Update.Canary,
|
||||
}
|
||||
}
|
||||
|
||||
if l := len(taskGroup.Tasks); l != 0 {
|
||||
tg.Tasks = make([]*structs.Task, l)
|
||||
for l, task := range taskGroup.Tasks {
|
||||
|
||||
@@ -858,8 +858,13 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Update: &api.UpdateStrategy{
|
||||
Stagger: 1 * time.Second,
|
||||
MaxParallel: 5,
|
||||
Stagger: 1 * time.Second,
|
||||
MaxParallel: helper.IntToPtr(5),
|
||||
HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Manual),
|
||||
MinHealthyTime: helper.TimeToPtr(1 * time.Minute),
|
||||
HealthyDeadline: helper.TimeToPtr(3 * time.Minute),
|
||||
AutoRevert: helper.BoolToPtr(false),
|
||||
Canary: helper.IntToPtr(1),
|
||||
},
|
||||
Periodic: &api.PeriodicConfig{
|
||||
Enabled: helper.BoolToPtr(true),
|
||||
@@ -899,6 +904,13 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
|
||||
Sticky: helper.BoolToPtr(true),
|
||||
Migrate: helper.BoolToPtr(true),
|
||||
},
|
||||
Update: &api.UpdateStrategy{
|
||||
HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Checks),
|
||||
MinHealthyTime: helper.TimeToPtr(2 * time.Minute),
|
||||
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
|
||||
AutoRevert: helper.BoolToPtr(true),
|
||||
},
|
||||
|
||||
Meta: map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
@@ -1078,6 +1090,14 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
|
||||
Sticky: true,
|
||||
Migrate: true,
|
||||
},
|
||||
Update: &structs.UpdateStrategy{
|
||||
MaxParallel: 5,
|
||||
HealthCheck: structs.UpdateStrategyHealthCheck_Checks,
|
||||
MinHealthyTime: 2 * time.Minute,
|
||||
HealthyDeadline: 5 * time.Minute,
|
||||
AutoRevert: true,
|
||||
Canary: 1,
|
||||
},
|
||||
Meta: map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
|
||||
@@ -272,6 +272,7 @@ func parseGroups(result *api.Job, list *ast.ObjectList) error {
|
||||
"meta",
|
||||
"task",
|
||||
"ephemeral_disk",
|
||||
"update",
|
||||
"vault",
|
||||
}
|
||||
if err := checkHCLKeys(listVal, valid); err != nil {
|
||||
@@ -287,6 +288,7 @@ func parseGroups(result *api.Job, list *ast.ObjectList) error {
|
||||
delete(m, "task")
|
||||
delete(m, "restart")
|
||||
delete(m, "ephemeral_disk")
|
||||
delete(m, "update")
|
||||
delete(m, "vault")
|
||||
|
||||
// Build the group with the basic decode
|
||||
@@ -318,6 +320,13 @@ func parseGroups(result *api.Job, list *ast.ObjectList) error {
|
||||
}
|
||||
}
|
||||
|
||||
// If we have an update strategy, then parse that
|
||||
if o := listVal.Filter("update"); len(o.Items) > 0 {
|
||||
if err := parseUpdate(&g.Update, o); err != nil {
|
||||
return multierror.Prefix(err, "update ->")
|
||||
}
|
||||
}
|
||||
|
||||
// Parse out meta fields. These are in HCL as a list so we need
|
||||
// to iterate over them and merge them.
|
||||
if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 {
|
||||
@@ -1104,7 +1113,7 @@ func parsePorts(networkObj *ast.ObjectList, nw *api.NetworkResource) error {
|
||||
func parseUpdate(result **api.UpdateStrategy, list *ast.ObjectList) error {
|
||||
list = list.Elem()
|
||||
if len(list.Items) > 1 {
|
||||
return fmt.Errorf("only one 'update' block allowed per job")
|
||||
return fmt.Errorf("only one 'update' block allowed")
|
||||
}
|
||||
|
||||
// Get our resource object
|
||||
@@ -1117,8 +1126,14 @@ func parseUpdate(result **api.UpdateStrategy, list *ast.ObjectList) error {
|
||||
|
||||
// Check for invalid keys
|
||||
valid := []string{
|
||||
// COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0.
|
||||
"stagger",
|
||||
"max_parallel",
|
||||
"health_check",
|
||||
"min_healthy_time",
|
||||
"healthy_deadline",
|
||||
"auto_revert",
|
||||
"canary",
|
||||
}
|
||||
if err := checkHCLKeys(o.Val, valid); err != nil {
|
||||
return err
|
||||
|
||||
@@ -46,8 +46,13 @@ func TestParse(t *testing.T) {
|
||||
},
|
||||
|
||||
Update: &api.UpdateStrategy{
|
||||
Stagger: 60 * time.Second,
|
||||
MaxParallel: 2,
|
||||
Stagger: 60 * time.Second,
|
||||
MaxParallel: helper.IntToPtr(2),
|
||||
HealthCheck: helper.StringToPtr("manual"),
|
||||
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(10 * time.Minute),
|
||||
AutoRevert: helper.BoolToPtr(true),
|
||||
Canary: helper.IntToPtr(1),
|
||||
},
|
||||
|
||||
TaskGroups: []*api.TaskGroup{
|
||||
@@ -92,6 +97,14 @@ func TestParse(t *testing.T) {
|
||||
Sticky: helper.BoolToPtr(true),
|
||||
SizeMB: helper.IntToPtr(150),
|
||||
},
|
||||
Update: &api.UpdateStrategy{
|
||||
MaxParallel: helper.IntToPtr(3),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(1 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(1 * time.Minute),
|
||||
AutoRevert: helper.BoolToPtr(false),
|
||||
Canary: helper.IntToPtr(2),
|
||||
},
|
||||
Tasks: []*api.Task{
|
||||
&api.Task{
|
||||
Name: "binstore",
|
||||
|
||||
@@ -18,6 +18,11 @@ job "binstore-storagelocker" {
|
||||
update {
|
||||
stagger = "60s"
|
||||
max_parallel = 2
|
||||
health_check = "manual"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 1
|
||||
}
|
||||
|
||||
task "outside" {
|
||||
@@ -47,6 +52,15 @@ job "binstore-storagelocker" {
|
||||
size = 150
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 3
|
||||
health_check = "checks"
|
||||
min_healthy_time = "1s"
|
||||
healthy_deadline = "1m"
|
||||
auto_revert = false
|
||||
canary = 2
|
||||
}
|
||||
|
||||
task "binstore" {
|
||||
driver = "docker"
|
||||
user = "bob"
|
||||
|
||||
@@ -57,25 +57,22 @@ type JobDiff struct {
|
||||
// diffable. If contextual diff is enabled, objects within the job will contain
|
||||
// field information even if unchanged.
|
||||
func (j *Job) Diff(other *Job, contextual bool) (*JobDiff, error) {
|
||||
// COMPAT: Remove "Update" in 0.7.0. Update pushed down to task groups
|
||||
// in 0.6.0
|
||||
diff := &JobDiff{Type: DiffTypeNone}
|
||||
var oldPrimitiveFlat, newPrimitiveFlat map[string]string
|
||||
filter := []string{"ID", "Status", "StatusDescription", "Version", "Stable", "CreateIndex",
|
||||
"ModifyIndex", "JobModifyIndex"}
|
||||
|
||||
// Have to treat this special since it is a struct literal, not a pointer
|
||||
var jUpdate, otherUpdate *UpdateStrategy
|
||||
"ModifyIndex", "JobModifyIndex", "Update"}
|
||||
|
||||
if j == nil && other == nil {
|
||||
return diff, nil
|
||||
} else if j == nil {
|
||||
j = &Job{}
|
||||
otherUpdate = &other.Update
|
||||
diff.Type = DiffTypeAdded
|
||||
newPrimitiveFlat = flatmap.Flatten(other, filter, true)
|
||||
diff.ID = other.ID
|
||||
} else if other == nil {
|
||||
other = &Job{}
|
||||
jUpdate = &j.Update
|
||||
diff.Type = DiffTypeDeleted
|
||||
oldPrimitiveFlat = flatmap.Flatten(j, filter, true)
|
||||
diff.ID = j.ID
|
||||
@@ -84,8 +81,6 @@ func (j *Job) Diff(other *Job, contextual bool) (*JobDiff, error) {
|
||||
return nil, fmt.Errorf("can not diff jobs with different IDs: %q and %q", j.ID, other.ID)
|
||||
}
|
||||
|
||||
jUpdate = &j.Update
|
||||
otherUpdate = &other.Update
|
||||
oldPrimitiveFlat = flatmap.Flatten(j, filter, true)
|
||||
newPrimitiveFlat = flatmap.Flatten(other, filter, true)
|
||||
diff.ID = other.ID
|
||||
@@ -117,11 +112,6 @@ func (j *Job) Diff(other *Job, contextual bool) (*JobDiff, error) {
|
||||
}
|
||||
diff.TaskGroups = tgs
|
||||
|
||||
// Update diff
|
||||
if uDiff := primitiveObjectDiff(jUpdate, otherUpdate, nil, "Update", contextual); uDiff != nil {
|
||||
diff.Objects = append(diff.Objects, uDiff)
|
||||
}
|
||||
|
||||
// Periodic diff
|
||||
if pDiff := primitiveObjectDiff(j.Periodic, other.Periodic, nil, "Periodic", contextual); pDiff != nil {
|
||||
diff.Objects = append(diff.Objects, pDiff)
|
||||
@@ -250,6 +240,12 @@ func (tg *TaskGroup) Diff(other *TaskGroup, contextual bool) (*TaskGroupDiff, er
|
||||
diff.Objects = append(diff.Objects, diskDiff)
|
||||
}
|
||||
|
||||
// Update diff
|
||||
// COMPAT: Remove "Stagger" in 0.7.0.
|
||||
if uDiff := primitiveObjectDiff(tg.Update, other.Update, []string{"Stagger"}, "Update", contextual); uDiff != nil {
|
||||
diff.Objects = append(diff.Objects, uDiff)
|
||||
}
|
||||
|
||||
// Tasks diff
|
||||
tasks, err := taskDiffs(tg.Tasks, other.Tasks, contextual)
|
||||
if err != nil {
|
||||
|
||||
@@ -187,26 +187,6 @@ func TestJobDiff(t *testing.T) {
|
||||
New: "",
|
||||
},
|
||||
},
|
||||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "Update",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "MaxParallel",
|
||||
Old: "0",
|
||||
New: "",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "Stagger",
|
||||
Old: "0",
|
||||
New: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -270,26 +250,6 @@ func TestJobDiff(t *testing.T) {
|
||||
New: "batch",
|
||||
},
|
||||
},
|
||||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "Update",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "MaxParallel",
|
||||
Old: "",
|
||||
New: "0",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "Stagger",
|
||||
Old: "",
|
||||
New: "0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -460,83 +420,6 @@ func TestJobDiff(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Update strategy edited
|
||||
Old: &Job{
|
||||
Update: UpdateStrategy{
|
||||
Stagger: 10 * time.Second,
|
||||
MaxParallel: 5,
|
||||
},
|
||||
},
|
||||
New: &Job{
|
||||
Update: UpdateStrategy{
|
||||
Stagger: 60 * time.Second,
|
||||
MaxParallel: 10,
|
||||
},
|
||||
},
|
||||
Expected: &JobDiff{
|
||||
Type: DiffTypeEdited,
|
||||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "Update",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "MaxParallel",
|
||||
Old: "5",
|
||||
New: "10",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "Stagger",
|
||||
Old: "10000000000",
|
||||
New: "60000000000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Update strategy edited with context
|
||||
Contextual: true,
|
||||
Old: &Job{
|
||||
Update: UpdateStrategy{
|
||||
Stagger: 10 * time.Second,
|
||||
MaxParallel: 5,
|
||||
},
|
||||
},
|
||||
New: &Job{
|
||||
Update: UpdateStrategy{
|
||||
Stagger: 60 * time.Second,
|
||||
MaxParallel: 5,
|
||||
},
|
||||
},
|
||||
Expected: &JobDiff{
|
||||
Type: DiffTypeEdited,
|
||||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "Update",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeNone,
|
||||
Name: "MaxParallel",
|
||||
Old: "5",
|
||||
New: "5",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "Stagger",
|
||||
Old: "10000000000",
|
||||
New: "60000000000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Periodic added
|
||||
Old: &Job{},
|
||||
@@ -1611,6 +1494,247 @@ func TestTaskGroupDiff(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Update strategy deleted
|
||||
Old: &TaskGroup{
|
||||
Update: &UpdateStrategy{
|
||||
AutoRevert: true,
|
||||
},
|
||||
},
|
||||
New: &TaskGroup{},
|
||||
Expected: &TaskGroupDiff{
|
||||
Type: DiffTypeEdited,
|
||||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "Update",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "AutoRevert",
|
||||
Old: "true",
|
||||
New: "",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "Canary",
|
||||
Old: "0",
|
||||
New: "",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "HealthyDeadline",
|
||||
Old: "0",
|
||||
New: "",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "MaxParallel",
|
||||
Old: "0",
|
||||
New: "",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "MinHealthyTime",
|
||||
Old: "0",
|
||||
New: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Update strategy added
|
||||
Old: &TaskGroup{},
|
||||
New: &TaskGroup{
|
||||
Update: &UpdateStrategy{
|
||||
AutoRevert: true,
|
||||
},
|
||||
},
|
||||
Expected: &TaskGroupDiff{
|
||||
Type: DiffTypeEdited,
|
||||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "Update",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "AutoRevert",
|
||||
Old: "",
|
||||
New: "true",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "Canary",
|
||||
Old: "",
|
||||
New: "0",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "HealthyDeadline",
|
||||
Old: "",
|
||||
New: "0",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "MaxParallel",
|
||||
Old: "",
|
||||
New: "0",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "MinHealthyTime",
|
||||
Old: "",
|
||||
New: "0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Update strategy edited
|
||||
Old: &TaskGroup{
|
||||
Update: &UpdateStrategy{
|
||||
MaxParallel: 5,
|
||||
HealthCheck: "foo",
|
||||
MinHealthyTime: 1 * time.Second,
|
||||
HealthyDeadline: 30 * time.Second,
|
||||
AutoRevert: true,
|
||||
Canary: 2,
|
||||
},
|
||||
},
|
||||
New: &TaskGroup{
|
||||
Update: &UpdateStrategy{
|
||||
MaxParallel: 7,
|
||||
HealthCheck: "bar",
|
||||
MinHealthyTime: 2 * time.Second,
|
||||
HealthyDeadline: 31 * time.Second,
|
||||
AutoRevert: false,
|
||||
Canary: 1,
|
||||
},
|
||||
},
|
||||
Expected: &TaskGroupDiff{
|
||||
Type: DiffTypeEdited,
|
||||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "Update",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "AutoRevert",
|
||||
Old: "true",
|
||||
New: "false",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "Canary",
|
||||
Old: "2",
|
||||
New: "1",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "HealthCheck",
|
||||
Old: "foo",
|
||||
New: "bar",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "HealthyDeadline",
|
||||
Old: "30000000000",
|
||||
New: "31000000000",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "MaxParallel",
|
||||
Old: "5",
|
||||
New: "7",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "MinHealthyTime",
|
||||
Old: "1000000000",
|
||||
New: "2000000000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Update strategy edited with context
|
||||
Contextual: true,
|
||||
Old: &TaskGroup{
|
||||
Update: &UpdateStrategy{
|
||||
MaxParallel: 5,
|
||||
HealthCheck: "foo",
|
||||
MinHealthyTime: 1 * time.Second,
|
||||
HealthyDeadline: 30 * time.Second,
|
||||
AutoRevert: true,
|
||||
Canary: 2,
|
||||
},
|
||||
},
|
||||
New: &TaskGroup{
|
||||
Update: &UpdateStrategy{
|
||||
MaxParallel: 7,
|
||||
HealthCheck: "foo",
|
||||
MinHealthyTime: 1 * time.Second,
|
||||
HealthyDeadline: 30 * time.Second,
|
||||
AutoRevert: true,
|
||||
Canary: 2,
|
||||
},
|
||||
},
|
||||
Expected: &TaskGroupDiff{
|
||||
Type: DiffTypeEdited,
|
||||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "Update",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeNone,
|
||||
Name: "AutoRevert",
|
||||
Old: "true",
|
||||
New: "true",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeNone,
|
||||
Name: "Canary",
|
||||
Old: "2",
|
||||
New: "2",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeNone,
|
||||
Name: "HealthCheck",
|
||||
Old: "foo",
|
||||
New: "foo",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeNone,
|
||||
Name: "HealthyDeadline",
|
||||
Old: "30000000000",
|
||||
New: "30000000000",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "MaxParallel",
|
||||
Old: "5",
|
||||
New: "7",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeNone,
|
||||
Name: "MinHealthyTime",
|
||||
Old: "1000000000",
|
||||
New: "1000000000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// EphemeralDisk added
|
||||
Old: &TaskGroup{},
|
||||
|
||||
@@ -1181,7 +1181,7 @@ type Job struct {
|
||||
// to run. Each task group is an atomic unit of scheduling and placement.
|
||||
TaskGroups []*TaskGroup
|
||||
|
||||
// Update is used to control the update strategy
|
||||
// COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0.
|
||||
Update UpdateStrategy
|
||||
|
||||
// Periodic is used to define the interval the job is run at.
|
||||
@@ -1586,15 +1586,92 @@ type TaskGroupSummary struct {
|
||||
Lost int
|
||||
}
|
||||
|
||||
const (
|
||||
// Checks uses any registered health check state in combination with task
|
||||
// states to determine if a allocation is healthy.
|
||||
UpdateStrategyHealthCheck_Checks = "checks"
|
||||
|
||||
// TaskStates uses the task states of an allocation to determine if the
|
||||
// allocation is healthy.
|
||||
UpdateStrategyHealthCheck_TaskStates = "task_states"
|
||||
|
||||
// Manual allows the operator to manually signal to Nomad when an
|
||||
// allocations is healthy. This allows more advanced health checking that is
|
||||
// outside of the scope of Nomad.
|
||||
UpdateStrategyHealthCheck_Manual = "manual"
|
||||
)
|
||||
|
||||
// UpdateStrategy is used to modify how updates are done
|
||||
type UpdateStrategy struct {
|
||||
// Stagger is the amount of time between the updates
|
||||
// COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0.
|
||||
Stagger time.Duration
|
||||
|
||||
// MaxParallel is how many updates can be done in parallel
|
||||
MaxParallel int
|
||||
|
||||
// HealthCheck specifies the mechanism in which allocations are marked
|
||||
// healthy or unhealthy as part of a deployment.
|
||||
HealthCheck string
|
||||
|
||||
// MinHealthyTime is the minimum time an allocation must be in the healthy
|
||||
// state before it is marked as healthy, unblocking more alllocations to be
|
||||
// rolled.
|
||||
MinHealthyTime time.Duration
|
||||
|
||||
// HealthyDeadline is the time in which an allocation must be marked as
|
||||
// healthy before it is automatically transistioned to unhealthy. This time
|
||||
// period doesn't count against the MinHealthyTime.
|
||||
HealthyDeadline time.Duration
|
||||
|
||||
// AutoRevert declares that if a deployment fails because of unhealthy
|
||||
// allocations, there should be an attempt to auto-revert the job to a
|
||||
// stable version.
|
||||
AutoRevert bool
|
||||
|
||||
// Canary is the number of canaries to deploy when a change to the task
|
||||
// group is detected.
|
||||
Canary int
|
||||
}
|
||||
|
||||
func (u *UpdateStrategy) Copy() *UpdateStrategy {
|
||||
if u == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
copy := new(UpdateStrategy)
|
||||
*copy = *u
|
||||
return copy
|
||||
}
|
||||
|
||||
func (u *UpdateStrategy) Validate() error {
|
||||
if u == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var mErr multierror.Error
|
||||
switch u.HealthCheck {
|
||||
case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual:
|
||||
default:
|
||||
multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck))
|
||||
}
|
||||
|
||||
if u.MaxParallel < 0 {
|
||||
multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than zero: %d < 0", u.MaxParallel))
|
||||
}
|
||||
if u.Canary < 0 {
|
||||
multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary))
|
||||
}
|
||||
if u.MinHealthyTime < 0 {
|
||||
multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime))
|
||||
}
|
||||
if u.HealthyDeadline <= 0 {
|
||||
multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline))
|
||||
}
|
||||
|
||||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
// TODO(alexdadgar): Remove once no longer used by the scheduler.
|
||||
// Rolling returns if a rolling strategy should be used
|
||||
func (u *UpdateStrategy) Rolling() bool {
|
||||
return u.Stagger > 0 && u.MaxParallel > 0
|
||||
@@ -1942,6 +2019,9 @@ type TaskGroup struct {
|
||||
// be scheduled.
|
||||
Count int
|
||||
|
||||
// Update is used to control the update strategy for this task group
|
||||
Update *UpdateStrategy
|
||||
|
||||
// Constraints can be specified at a task group level and apply to
|
||||
// all the tasks contained.
|
||||
Constraints []*Constraint
|
||||
@@ -1966,8 +2046,8 @@ func (tg *TaskGroup) Copy() *TaskGroup {
|
||||
}
|
||||
ntg := new(TaskGroup)
|
||||
*ntg = *tg
|
||||
ntg.Update = ntg.Update.Copy()
|
||||
ntg.Constraints = CopySliceConstraints(ntg.Constraints)
|
||||
|
||||
ntg.RestartPolicy = ntg.RestartPolicy.Copy()
|
||||
|
||||
if tg.Tasks != nil {
|
||||
@@ -2057,6 +2137,23 @@ func (tg *TaskGroup) Validate() error {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name))
|
||||
}
|
||||
|
||||
// Validate the update strategy
|
||||
if u := tg.Update; u != nil {
|
||||
if err := u.Validate(); err != nil {
|
||||
mErr.Errors = append(mErr.Errors, err)
|
||||
}
|
||||
|
||||
// Validate the counts are appropriate
|
||||
if u.MaxParallel > tg.Count {
|
||||
mErr.Errors = append(mErr.Errors,
|
||||
fmt.Errorf("Update max parallel count is greater than task group count: %d > %d", u.MaxParallel, tg.Count))
|
||||
}
|
||||
if u.Canary > tg.Count {
|
||||
mErr.Errors = append(mErr.Errors,
|
||||
fmt.Errorf("Update canary count is greater than task group count: %d > %d", u.Canary, tg.Count))
|
||||
}
|
||||
}
|
||||
|
||||
// Check for duplicate tasks and that there is only leader task if any
|
||||
tasks := make(map[string]int)
|
||||
leaderTasks := 0
|
||||
|
||||
@@ -429,6 +429,14 @@ func TestTaskGroup_Validate(t *testing.T) {
|
||||
Attempts: 10,
|
||||
Mode: RestartPolicyModeDelay,
|
||||
},
|
||||
Update: &UpdateStrategy{
|
||||
MaxParallel: 3,
|
||||
HealthCheck: UpdateStrategyHealthCheck_Manual,
|
||||
MinHealthyTime: 1 * time.Second,
|
||||
HealthyDeadline: 1 * time.Second,
|
||||
AutoRevert: false,
|
||||
Canary: 3,
|
||||
},
|
||||
}
|
||||
|
||||
err = tg.Validate()
|
||||
@@ -436,16 +444,22 @@ func TestTaskGroup_Validate(t *testing.T) {
|
||||
if !strings.Contains(mErr.Errors[0].Error(), "should have an ephemeral disk object") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if !strings.Contains(mErr.Errors[1].Error(), "2 redefines 'web' from task 1") {
|
||||
if !strings.Contains(mErr.Errors[1].Error(), "max parallel count is greater") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if !strings.Contains(mErr.Errors[2].Error(), "Task 3 missing name") {
|
||||
if !strings.Contains(mErr.Errors[2].Error(), "canary count is greater") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if !strings.Contains(mErr.Errors[3].Error(), "Only one task may be marked as leader") {
|
||||
if !strings.Contains(mErr.Errors[3].Error(), "2 redefines 'web' from task 1") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if !strings.Contains(mErr.Errors[4].Error(), "Task web validation failed") {
|
||||
if !strings.Contains(mErr.Errors[4].Error(), "Task 3 missing name") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if !strings.Contains(mErr.Errors[5].Error(), "Only one task may be marked as leader") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if !strings.Contains(mErr.Errors[6].Error(), "Task web validation failed") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
@@ -833,6 +847,35 @@ func TestConstraint_Validate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateStrategy_Validate(t *testing.T) {
|
||||
u := &UpdateStrategy{
|
||||
MaxParallel: -1,
|
||||
HealthCheck: "foo",
|
||||
MinHealthyTime: -10,
|
||||
HealthyDeadline: -10,
|
||||
AutoRevert: false,
|
||||
Canary: -1,
|
||||
}
|
||||
|
||||
err := u.Validate()
|
||||
mErr := err.(*multierror.Error)
|
||||
if !strings.Contains(mErr.Errors[0].Error(), "Invalid health check given") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if !strings.Contains(mErr.Errors[1].Error(), "Max parallel can not be less than zero") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if !strings.Contains(mErr.Errors[2].Error(), "Canary count can not be less than zero") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if !strings.Contains(mErr.Errors[3].Error(), "Minimum healthy time may not be less than zero") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if !strings.Contains(mErr.Errors[4].Error(), "Healthy deadline must be greater than zero") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResource_NetIndex(t *testing.T) {
|
||||
r := &Resources{
|
||||
Networks: []*NetworkResource{
|
||||
|
||||
Reference in New Issue
Block a user