e2e: correction to assertions in system scheduler tests (#26875)

This commit is contained in:
Piotr Kazmierczak
2025-10-02 21:10:46 +02:00
committed by GitHub
parent 696ad4789e
commit b22b7ab273
2 changed files with 17 additions and 28 deletions

View File

@@ -17,7 +17,7 @@ job "system_job" {
min_healthy_time = "1s"
healthy_deadline = "1m"
auto_revert = false
canary = 50
canary = 100
}
restart {

View File

@@ -5,6 +5,7 @@ package scheduler_system
import (
"context"
"math"
"testing"
"time"
@@ -93,13 +94,17 @@ func testJobUpdateOnIneligbleNode(t *testing.T) {
}
func testCanaryUpdate(t *testing.T) {
_, cleanup := jobs3.Submit(t,
job, cleanup := jobs3.Submit(t,
"./input/system_canary_v0.nomad.hcl",
jobs3.DisableRandomJobID(),
jobs3.Timeout(60*time.Second),
)
t.Cleanup(cleanup)
// Get initial allocations
initialAllocs := job.Allocs()
must.SliceNotEmpty(t, initialAllocs)
// Update job
job2, cleanup2 := jobs3.Submit(t,
"./input/system_canary_v1.nomad.hcl",
@@ -109,19 +114,6 @@ func testCanaryUpdate(t *testing.T) {
)
t.Cleanup(cleanup2)
// how many eligible nodes do we have?
nodesApi := job2.NodesApi()
nodesList, _, err := nodesApi.List(nil)
must.Nil(t, err)
must.SliceNotEmpty(t, nodesList)
numberOfEligibleNodes := 0
for _, n := range nodesList {
if n.SchedulingEligibility == api.NodeSchedulingEligible {
numberOfEligibleNodes += 1
}
}
// Get updated allocations
allocs := job2.Allocs()
must.SliceNotEmpty(t, allocs)
@@ -152,7 +144,7 @@ func testCanaryUpdate(t *testing.T) {
})
// find allocations from v1 version of the job, they should all be canaries
// and there should be exactly 2
// and there should be exactly 50% (rounded up) of v0 allocations
count := 0
for _, a := range allocs {
if a.JobVersion == 1 {
@@ -160,7 +152,7 @@ func testCanaryUpdate(t *testing.T) {
count += 1
}
}
must.Eq(t, numberOfEligibleNodes/2, count, must.Sprint("expected canaries to be placed on 50% of eligible nodes"))
must.Eq(t, int(math.Ceil(float64(len(initialAllocs)/2))), count, must.Sprint("expected canaries to be placed on 50% of feasible nodes"))
// promote canaries
deployments, _, err := deploymentsApi.List(nil)
@@ -192,7 +184,7 @@ func testCanaryUpdate(t *testing.T) {
})
// expect the number of allocations for promoted deployment to be the same
// as the number of eligible nodes
// as the number of initial allocations
newAllocs := job2.Allocs()
must.SliceNotEmpty(t, newAllocs)
@@ -202,17 +194,21 @@ func testCanaryUpdate(t *testing.T) {
promotedAllocs += 1
}
}
must.Eq(t, numberOfEligibleNodes, promotedAllocs)
must.Eq(t, len(initialAllocs), promotedAllocs)
}
func testCanaryDeploymentToAllEligibleNodes(t *testing.T) {
_, cleanup := jobs3.Submit(t,
job, cleanup := jobs3.Submit(t,
"./input/system_canary_v0_100.nomad.hcl",
jobs3.DisableRandomJobID(),
jobs3.Timeout(60*time.Second),
)
t.Cleanup(cleanup)
// Get initial allocations
initialAllocs := job.Allocs()
must.SliceNotEmpty(t, initialAllocs)
// Update job
job2, cleanup2 := jobs3.Submit(t,
"./input/system_canary_v1_100.nomad.hcl",
@@ -228,13 +224,6 @@ func testCanaryDeploymentToAllEligibleNodes(t *testing.T) {
must.Nil(t, err)
must.SliceNotEmpty(t, nodesList)
numberOfEligibleNodes := 0
for _, n := range nodesList {
if n.SchedulingEligibility == api.NodeSchedulingEligible {
numberOfEligibleNodes += 1
}
}
// Get updated allocations
allocs := job2.Allocs()
must.SliceNotEmpty(t, allocs)
@@ -272,7 +261,7 @@ func testCanaryDeploymentToAllEligibleNodes(t *testing.T) {
count += 1
}
}
must.Eq(t, numberOfEligibleNodes, count, must.Sprint("expected canaries to be placed on all eligible nodes"))
must.Eq(t, len(initialAllocs), count, must.Sprint("expected canaries to be placed on all eligible nodes"))
// deployment must not be terminal and needs to have the right status
// description set