mirror of
https://github.com/kemko/nomad.git
synced 2026-01-01 16:05:42 +03:00
Allow for in-place update when affinity or spread was changed (#25109)
Similarly to #6732 it removes checking affinity and spread for inplace update. Both affinity and spread should be as soft preference for Nomad scheduler rather than strict constraint. Therefore modifying them should not trigger job reallocation. Fixes #25070 Co-authored-by: Tim Gross <tgross@hashicorp.com>
This commit is contained in:
3
.changelog/25109.txt
Normal file
3
.changelog/25109.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
scheduler: Fixed a bug that made affinity and spread updates destructive
|
||||
```
|
||||
@@ -182,13 +182,13 @@ FieldsLoop:
|
||||
}
|
||||
|
||||
// Object changes that can be done in-place are log configs, services,
|
||||
// constraints.
|
||||
// constraints, affinity or spread.
|
||||
|
||||
if !destructive {
|
||||
ObjectsLoop:
|
||||
for _, oDiff := range diff.Objects {
|
||||
switch oDiff.Name {
|
||||
case "Service", "Constraint":
|
||||
case "Service", "Constraint", "Affinity", "Spread":
|
||||
continue
|
||||
case "LogConfig":
|
||||
for _, fDiff := range oDiff.Fields {
|
||||
|
||||
@@ -309,6 +309,48 @@ func TestAnnotateTask(t *testing.T) {
|
||||
Parent: &structs.TaskGroupDiff{Type: structs.DiffTypeEdited},
|
||||
Desired: AnnotationForcesInplaceUpdate,
|
||||
},
|
||||
{
|
||||
Diff: &structs.TaskDiff{
|
||||
Type: structs.DiffTypeEdited,
|
||||
Objects: []*structs.ObjectDiff{
|
||||
{
|
||||
Type: structs.DiffTypeAdded,
|
||||
Name: "Affinity",
|
||||
Fields: []*structs.FieldDiff{
|
||||
{
|
||||
Type: structs.DiffTypeAdded,
|
||||
Name: "LTarget",
|
||||
Old: "",
|
||||
New: "baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Parent: &structs.TaskGroupDiff{Type: structs.DiffTypeEdited},
|
||||
Desired: AnnotationForcesInplaceUpdate,
|
||||
},
|
||||
{
|
||||
Diff: &structs.TaskDiff{
|
||||
Type: structs.DiffTypeEdited,
|
||||
Objects: []*structs.ObjectDiff{
|
||||
{
|
||||
Type: structs.DiffTypeAdded,
|
||||
Name: "Spread",
|
||||
Fields: []*structs.FieldDiff{
|
||||
{
|
||||
Type: structs.DiffTypeAdded,
|
||||
Name: "LTarget",
|
||||
Old: "",
|
||||
New: "baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Parent: &structs.TaskGroupDiff{Type: structs.DiffTypeEdited},
|
||||
Desired: AnnotationForcesInplaceUpdate,
|
||||
},
|
||||
{
|
||||
Diff: &structs.TaskDiff{
|
||||
Type: structs.DiffTypeEdited,
|
||||
|
||||
@@ -233,16 +233,6 @@ func tasksUpdated(jobA, jobB *structs.Job, taskGroup string) comparison {
|
||||
return c
|
||||
}
|
||||
|
||||
// Check Affinities
|
||||
if c := affinitiesUpdated(jobA, jobB, taskGroup); c.modified {
|
||||
return c
|
||||
}
|
||||
|
||||
// Check Spreads
|
||||
if c := spreadsUpdated(jobA, jobB, taskGroup); c.modified {
|
||||
return c
|
||||
}
|
||||
|
||||
// Check consul updated
|
||||
if c := consulUpdated(a.Consul, b.Consul); c.modified {
|
||||
return c
|
||||
@@ -580,67 +570,6 @@ func networkPortMap(n *structs.NetworkResource) structs.AllocatedPorts {
|
||||
return m
|
||||
}
|
||||
|
||||
func affinitiesUpdated(jobA, jobB *structs.Job, taskGroup string) comparison {
|
||||
var affinitiesA structs.Affinities
|
||||
var affinitiesB structs.Affinities
|
||||
|
||||
// accumulate job affinities
|
||||
|
||||
affinitiesA = append(affinitiesA, jobA.Affinities...)
|
||||
affinitiesB = append(affinitiesB, jobB.Affinities...)
|
||||
|
||||
tgA := jobA.LookupTaskGroup(taskGroup)
|
||||
tgB := jobB.LookupTaskGroup(taskGroup)
|
||||
|
||||
// append group level affinities
|
||||
|
||||
affinitiesA = append(affinitiesA, tgA.Affinities...)
|
||||
affinitiesB = append(affinitiesB, tgB.Affinities...)
|
||||
|
||||
// append task level affinities for A
|
||||
|
||||
for _, task := range tgA.Tasks {
|
||||
affinitiesA = append(affinitiesA, task.Affinities...)
|
||||
}
|
||||
|
||||
// append task level affinities for B
|
||||
for _, task := range tgB.Tasks {
|
||||
affinitiesB = append(affinitiesB, task.Affinities...)
|
||||
}
|
||||
|
||||
// finally check if all the affinities from both jobs match
|
||||
if !affinitiesA.Equal(&affinitiesB) {
|
||||
return difference("affinities", affinitiesA, affinitiesB)
|
||||
}
|
||||
|
||||
return same
|
||||
}
|
||||
|
||||
func spreadsUpdated(jobA, jobB *structs.Job, taskGroup string) comparison {
|
||||
var spreadsA []*structs.Spread
|
||||
var spreadsB []*structs.Spread
|
||||
|
||||
// accumulate job spreads
|
||||
|
||||
spreadsA = append(spreadsA, jobA.Spreads...)
|
||||
spreadsB = append(spreadsB, jobB.Spreads...)
|
||||
|
||||
tgA := jobA.LookupTaskGroup(taskGroup)
|
||||
tgB := jobB.LookupTaskGroup(taskGroup)
|
||||
|
||||
// append group spreads
|
||||
spreadsA = append(spreadsA, tgA.Spreads...)
|
||||
spreadsB = append(spreadsB, tgB.Spreads...)
|
||||
|
||||
if !slices.EqualFunc(spreadsA, spreadsB, func(a, b *structs.Spread) bool {
|
||||
return a.Equal(b)
|
||||
}) {
|
||||
return difference("spreads", spreadsA, spreadsB)
|
||||
}
|
||||
|
||||
return same
|
||||
}
|
||||
|
||||
// renderTemplatesUpdated returns the difference in the RestartPolicy's
|
||||
// render_templates field, if set
|
||||
func renderTemplatesUpdated(a, b *structs.RestartPolicy, msg string) comparison {
|
||||
|
||||
@@ -233,140 +233,6 @@ func TestShuffleNodes(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestTaskUpdatedAffinity(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
j1 := mock.Job()
|
||||
j2 := mock.Job()
|
||||
name := j1.TaskGroups[0].Name
|
||||
must.False(t, tasksUpdated(j1, j2, name).modified)
|
||||
|
||||
// TaskGroup Affinity
|
||||
j2.TaskGroups[0].Affinities = []*structs.Affinity{
|
||||
{
|
||||
LTarget: "node.datacenter",
|
||||
RTarget: "dc1",
|
||||
Operand: "=",
|
||||
Weight: 100,
|
||||
},
|
||||
}
|
||||
must.True(t, tasksUpdated(j1, j2, name).modified)
|
||||
|
||||
// TaskGroup Task Affinity
|
||||
j3 := mock.Job()
|
||||
j3.TaskGroups[0].Tasks[0].Affinities = []*structs.Affinity{
|
||||
{
|
||||
LTarget: "node.datacenter",
|
||||
RTarget: "dc1",
|
||||
Operand: "=",
|
||||
Weight: 100,
|
||||
},
|
||||
}
|
||||
must.True(t, tasksUpdated(j1, j3, name).modified)
|
||||
|
||||
j4 := mock.Job()
|
||||
j4.TaskGroups[0].Tasks[0].Affinities = []*structs.Affinity{
|
||||
{
|
||||
LTarget: "node.datacenter",
|
||||
RTarget: "dc1",
|
||||
Operand: "=",
|
||||
Weight: 100,
|
||||
},
|
||||
}
|
||||
must.True(t, tasksUpdated(j1, j4, name).modified)
|
||||
|
||||
// check different level of same affinity
|
||||
j5 := mock.Job()
|
||||
j5.Affinities = []*structs.Affinity{
|
||||
{
|
||||
LTarget: "node.datacenter",
|
||||
RTarget: "dc1",
|
||||
Operand: "=",
|
||||
Weight: 100,
|
||||
},
|
||||
}
|
||||
|
||||
j6 := mock.Job()
|
||||
j6.Affinities = make([]*structs.Affinity, 0)
|
||||
j6.TaskGroups[0].Affinities = []*structs.Affinity{
|
||||
{
|
||||
LTarget: "node.datacenter",
|
||||
RTarget: "dc1",
|
||||
Operand: "=",
|
||||
Weight: 100,
|
||||
},
|
||||
}
|
||||
must.False(t, tasksUpdated(j5, j6, name).modified)
|
||||
}
|
||||
|
||||
func TestTaskUpdatedSpread(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
j1 := mock.Job()
|
||||
j2 := mock.Job()
|
||||
name := j1.TaskGroups[0].Name
|
||||
|
||||
must.False(t, tasksUpdated(j1, j2, name).modified)
|
||||
|
||||
// TaskGroup Spread
|
||||
j2.TaskGroups[0].Spreads = []*structs.Spread{
|
||||
{
|
||||
Attribute: "node.datacenter",
|
||||
Weight: 100,
|
||||
SpreadTarget: []*structs.SpreadTarget{
|
||||
{
|
||||
Value: "r1",
|
||||
Percent: 50,
|
||||
},
|
||||
{
|
||||
Value: "r2",
|
||||
Percent: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
must.True(t, tasksUpdated(j1, j2, name).modified)
|
||||
|
||||
// check different level of same constraint
|
||||
j5 := mock.Job()
|
||||
j5.Spreads = []*structs.Spread{
|
||||
{
|
||||
Attribute: "node.datacenter",
|
||||
Weight: 100,
|
||||
SpreadTarget: []*structs.SpreadTarget{
|
||||
{
|
||||
Value: "r1",
|
||||
Percent: 50,
|
||||
},
|
||||
{
|
||||
Value: "r2",
|
||||
Percent: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
j6 := mock.Job()
|
||||
j6.TaskGroups[0].Spreads = []*structs.Spread{
|
||||
{
|
||||
Attribute: "node.datacenter",
|
||||
Weight: 100,
|
||||
SpreadTarget: []*structs.SpreadTarget{
|
||||
{
|
||||
Value: "r1",
|
||||
Percent: 50,
|
||||
},
|
||||
{
|
||||
Value: "r2",
|
||||
Percent: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
must.False(t, tasksUpdated(j5, j6, name).modified)
|
||||
}
|
||||
|
||||
func TestTasksUpdated(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
@@ -62,6 +62,10 @@ Operators can use weights to express relative preference across multiple affinit
|
||||
placement is still successful. This is different from [constraints][constraint] where placement is
|
||||
restricted only to nodes that meet the constraint's criteria.
|
||||
|
||||
Updating the `affinity` block is non-destructive. Updating a job specification
|
||||
with only non-destructive updates will not migrate or replace existing
|
||||
allocations.
|
||||
|
||||
## `affinity` Parameters
|
||||
|
||||
- `attribute` `(string: "")` - Specifies the name or reference of the attribute
|
||||
|
||||
@@ -62,6 +62,10 @@ Placing constraints at both the job level and at the group level is redundant
|
||||
since constraints are applied hierarchically. The job constraints will affect
|
||||
all groups (and tasks) in the job.
|
||||
|
||||
Updating the `constraint` block is non-destructive. Updating a job specification
|
||||
with only non-destructive updates will not migrate or replace existing
|
||||
allocations.
|
||||
|
||||
## `constraint` Parameters
|
||||
|
||||
- `attribute` `(string: "")` - Specifies the name or reference of the attribute
|
||||
|
||||
@@ -70,6 +70,10 @@ metadata][client-meta]. Additionally, spread may be specified at the [job][job]
|
||||
and [group][group] levels for ultimate flexibility. Job level spread criteria
|
||||
are inherited by all task groups in the job.
|
||||
|
||||
Updating the `spread` block is non-destructive. Updating a job specification
|
||||
with only non-destructive updates will not migrate or replace existing
|
||||
allocations.
|
||||
|
||||
## `spread` Parameters
|
||||
|
||||
- `attribute` `(string: "")` - Specifies the name or reference of the attribute
|
||||
|
||||
@@ -43,6 +43,15 @@ will now only be loaded when they have a corresponding
|
||||
[`plugin`](/nomad/docs/configuration/plugin) block in the agent configuration
|
||||
file.
|
||||
|
||||
#### Affinity and spread updates are non-destructive
|
||||
|
||||
In Nomad 1.10.0, a scheduler bug was fixed so that updates to `affinity` and
|
||||
`spread` blocks are no longer destructive. After a job update that changes only
|
||||
these blocks, existing allocations remain running with their job version
|
||||
incremented. If you were relying on the previous behavior to redistribute
|
||||
workloads, you can force a destructive update by changing fields that require
|
||||
one, such as the `meta` block.
|
||||
|
||||
## Nomad 1.9.5
|
||||
|
||||
#### CNI plugins
|
||||
|
||||
Reference in New Issue
Block a user