diff --git a/jobspec/parse.go b/jobspec/parse.go index a7ef01799..18d1a4ff3 100644 --- a/jobspec/parse.go +++ b/jobspec/parse.go @@ -257,7 +257,7 @@ func parseConstraints(result *[]*structs.Constraint, obj *hclobj.Object) error { m["RTarget"] = constraint } - if value, ok := m["unique"]; ok { + if value, ok := m["distinctHosts"]; ok { enabled, err := strconv.ParseBool(value.(string)) if err != nil { return err @@ -268,7 +268,7 @@ func parseConstraints(result *[]*structs.Constraint, obj *hclobj.Object) error { continue } - m["Operand"] = "unique" + m["Operand"] = "distinctHosts" } // Build the constraint diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index 979ef6020..07cb7c914 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -193,7 +193,7 @@ func TestParse(t *testing.T) { }, { - "unique-constraint.hcl", + "distinctHosts-constraint.hcl", &structs.Job{ ID: "foo", Name: "foo", @@ -203,7 +203,7 @@ func TestParse(t *testing.T) { Constraints: []*structs.Constraint{ &structs.Constraint{ Hard: true, - Operand: "unique", + Operand: "distinctHosts", }, }, }, diff --git a/jobspec/test-fixtures/unique-constraint.hcl b/jobspec/test-fixtures/unique-constraint.hcl deleted file mode 100644 index 47c31d2d9..000000000 --- a/jobspec/test-fixtures/unique-constraint.hcl +++ /dev/null @@ -1,5 +0,0 @@ -job "foo" { - constraint { - unique = "true" - } -} diff --git a/scheduler/feasible.go b/scheduler/feasible.go index 37c066faa..977fa034c 100644 --- a/scheduler/feasible.go +++ b/scheduler/feasible.go @@ -152,7 +152,7 @@ func (iter *DriverIterator) hasDrivers(option *structs.Node) bool { // DynamicConstraintIterator is a FeasibleIterator which returns nodes that // match constraints that are not static such as Node attributes but are -// effected by alloc placements. Examples are unique and tenancy constraints. +// effected by alloc placements. Examples are distinctHosts and tenancy constraints. // This is used to filter on job and task group constraints. type DynamicConstraintIterator struct { ctx Context @@ -160,10 +160,10 @@ type DynamicConstraintIterator struct { tg *structs.TaskGroup job *structs.Job - // Store whether the Job or TaskGroup has unique constraints so they don't - // have to be calculated every time Next() is called. - tgUnique bool - jobUnique bool + // Store whether the Job or TaskGroup has a distinctHosts constraints so + // they don't have to be calculated every time Next() is called. + tgDistinctHosts bool + jobDistinctHosts bool } // NewDynamicConstraintIterator creates a DynamicConstraintIterator from a @@ -178,21 +178,21 @@ func NewDynamicConstraintIterator(ctx Context, source FeasibleIterator) *Dynamic func (iter *DynamicConstraintIterator) SetTaskGroup(tg *structs.TaskGroup) { iter.tg = tg - iter.tgUnique = iter.hasUniqueConstraint(tg.Constraints) + iter.tgDistinctHosts = iter.hasDistinctHostsConstraint(tg.Constraints) } func (iter *DynamicConstraintIterator) SetJob(job *structs.Job) { iter.job = job - iter.jobUnique = iter.hasUniqueConstraint(job.Constraints) + iter.jobDistinctHosts = iter.hasDistinctHostsConstraint(job.Constraints) } -func (iter *DynamicConstraintIterator) hasUniqueConstraint(constraints []*structs.Constraint) bool { +func (iter *DynamicConstraintIterator) hasDistinctHostsConstraint(constraints []*structs.Constraint) bool { if constraints == nil { return false } for _, con := range constraints { - if con.Operand == "unique" { + if con.Operand == "distinctHosts" { return true } } @@ -214,13 +214,13 @@ func (iter *DynamicConstraintIterator) Next() *structs.Node { // Get the next option from the source option := iter.source.Next() - // Hot-path if the option is nil or there are no unique constraints. - if option == nil || (!iter.jobUnique && !iter.tgUnique) { + // Hot-path if the option is nil or there are no distinctHosts constraints. + if option == nil || (!iter.jobDistinctHosts && !iter.tgDistinctHosts) { return option } - if !iter.satisfiesUnique(option, iter.jobUnique) { - iter.ctx.Metrics().FilterNode(option, "unique") + if !iter.satisfiesDistinctHosts(option, iter.jobDistinctHosts) { + iter.ctx.Metrics().FilterNode(option, "distinctHosts") continue } @@ -228,9 +228,9 @@ func (iter *DynamicConstraintIterator) Next() *structs.Node { } } -// satisfiesUnique checks if the node satisfies a unique constraint either -// specified at the job level or the TaskGroup level. -func (iter *DynamicConstraintIterator) satisfiesUnique(option *structs.Node, job bool) bool { +// satisfiesDistinctHosts checks if the node satisfies a distinctHosts +// constraint either specified at the job level or the TaskGroup level. +func (iter *DynamicConstraintIterator) satisfiesDistinctHosts(option *structs.Node, job bool) bool { // Get the proposed allocations proposed, err := iter.ctx.ProposedAllocs(option.ID) if err != nil { @@ -244,9 +244,9 @@ func (iter *DynamicConstraintIterator) satisfiesUnique(option *structs.Node, job jobCollision := alloc.JobID == iter.job.ID taskCollision := alloc.TaskGroup == iter.tg.Name - // If the job has a unique constraint we only need an alloc collision on - // the JobID but if the constraint is on the TaskGroup then we need both - // a job and TaskGroup collision. + // If the job has a distinctHosts constraint we only need an alloc + // collision on the JobID but if the constraint is on the TaskGroup then + // we need both a job and TaskGroup collision. jobInvalid := job && jobCollision tgInvalid := !job && jobCollision && taskCollision if jobInvalid || tgInvalid { @@ -370,7 +370,7 @@ func resolveConstraintTarget(target string, node *structs.Node) (interface{}, bo func checkConstraint(ctx Context, operand string, lVal, rVal interface{}) bool { // Check for constraints not handled by this iterator. switch operand { - case "unique": + case "distinctHosts": return true default: break diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index b7871b8d6..74eb99f03 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -382,7 +382,7 @@ func TestCheckRegexpConstraint(t *testing.T) { } } -func TestDynamicConstraint_JobUnique_Feasible(t *testing.T) { +func TestDynamicConstraint_JobDistinctHosts(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -392,13 +392,13 @@ func TestDynamicConstraint_JobUnique_Feasible(t *testing.T) { } static := NewStaticIterator(ctx, nodes) - // Create a job with a unique constraint and two task groups. + // Create a job with a distinctHosts constraint and two task groups. tg1 := &structs.TaskGroup{Name: "bar"} tg2 := &structs.TaskGroup{Name: "baz"} job := &structs.Job{ ID: "foo", - Constraints: []*structs.Constraint{{Operand: "unique"}}, + Constraints: []*structs.Constraint{{Operand: "distinctHosts"}}, TaskGroups: []*structs.TaskGroup{tg1, tg2}, } @@ -420,7 +420,7 @@ func TestDynamicConstraint_JobUnique_Feasible(t *testing.T) { } } -func TestDynamicConstraint_JobUnique_Infeasible(t *testing.T) { +func TestDynamicConstraint_JobDistinctHosts_Infeasible(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -428,13 +428,13 @@ func TestDynamicConstraint_JobUnique_Infeasible(t *testing.T) { } static := NewStaticIterator(ctx, nodes) - // Create a job with a unique constraint and two task groups. + // Create a job with a distinctHosts constraint and two task groups. tg1 := &structs.TaskGroup{Name: "bar"} tg2 := &structs.TaskGroup{Name: "baz"} job := &structs.Job{ ID: "foo", - Constraints: []*structs.Constraint{{Operand: "unique"}}, + Constraints: []*structs.Constraint{{Operand: "distinctHosts"}}, TaskGroups: []*structs.TaskGroup{tg1, tg2}, } @@ -476,7 +476,7 @@ func TestDynamicConstraint_JobUnique_Infeasible(t *testing.T) { } } -func TestDynamicConstraint_JobUnique_InfeasibleCount(t *testing.T) { +func TestDynamicConstraint_JobDistinctHosts_InfeasibleCount(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -484,14 +484,14 @@ func TestDynamicConstraint_JobUnique_InfeasibleCount(t *testing.T) { } static := NewStaticIterator(ctx, nodes) - // Create a job with a unique constraint and three task groups. + // Create a job with a distinctHosts constraint and three task groups. tg1 := &structs.TaskGroup{Name: "bar"} tg2 := &structs.TaskGroup{Name: "baz"} tg3 := &structs.TaskGroup{Name: "bam"} job := &structs.Job{ ID: "foo", - Constraints: []*structs.Constraint{{Operand: "unique"}}, + Constraints: []*structs.Constraint{{Operand: "distinctHosts"}}, TaskGroups: []*structs.TaskGroup{tg1, tg2, tg3}, } @@ -506,7 +506,7 @@ func TestDynamicConstraint_JobUnique_InfeasibleCount(t *testing.T) { } } -func TestDynamicConstraint_TaskGroupUnique(t *testing.T) { +func TestDynamicConstraint_TaskGroupDistinctHosts(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -514,11 +514,11 @@ func TestDynamicConstraint_TaskGroupUnique(t *testing.T) { } static := NewStaticIterator(ctx, nodes) - // Create a task group with a unique constraint. + // Create a task group with a distinctHosts constraint. taskGroup := &structs.TaskGroup{ Name: "example", Constraints: []*structs.Constraint{ - {Operand: "unique"}, + {Operand: "distinctHosts"}, }, } diff --git a/website/source/docs/jobspec/index.html.md b/website/source/docs/jobspec/index.html.md index 540ca1be2..3084b3b55 100644 --- a/website/source/docs/jobspec/index.html.md +++ b/website/source/docs/jobspec/index.html.md @@ -237,13 +237,17 @@ The `constraint` object supports the following keys: the attribute. This sets the operator to "regexp" and the `value` to the regular expression. -* `unique` - Unique accepts a boolean value and can be used to mark a Job or - a Task Group as requiring placement on unique nodes. If the `unique` - constraint is placed on a Job, all of it's Task Groups must be placed on - unique nodes. If the `unique` constraint is placed on a Task Group, then - multiple instances of that Task Group must be placed on unique nodes. This - sets the operator to "unique" if `unique` is set to "true". If set to "false", - the constraint is ignored as this is the default behavior. +* `distinctHosts` - `distinctHosts` accepts a boolean `true`. The default is + `false`. + + When `distinctHosts is `true` at the Job level, each instance of all Task + Groups specified in the job is placed on a separate host. + + When `distinctHosts` is `true` at the Task Group level with count > 1, each + instance of a Task Group is placed on a separate host. Different task groups in + the same job _may_ be co-scheduled. + + Tasks within a task group are always co-scheduled. Below is a table documenting the variables that can be interpreted: