new e2e test for spread, and refactor affinity tests to share util methods

This commit is contained in:
Preetha Appan
2018-12-19 21:25:32 -06:00
parent e514d0a617
commit 7ad040166d
7 changed files with 85 additions and 170 deletions

View File

@@ -1,16 +1,10 @@
package affinities
import (
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/e2e/framework"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/jobspec"
"github.com/stretchr/testify/require"
. "github.com/onsi/gomega"
"github.com/hashicorp/nomad/e2e/e2eutil"
)
type BasicAffinityTest struct {
@@ -30,45 +24,14 @@ func init() {
func (tc *BasicAffinityTest) BeforeAll(f *framework.F) {
// Ensure cluster has leader before running tests
framework.WaitForLeader(f.T(), tc.Nomad())
}
func (tc *BasicAffinityTest) registerAndWaitForAllocs(f *framework.F, jobFile string, prefix string) []*api.AllocationListStub {
nomadClient := tc.Nomad()
// Parse job
job, err := jobspec.ParseFile(jobFile)
require := require.New(f.T())
require.Nil(err)
uuid := uuid.Generate()
jobId := helper.StringToPtr(prefix + uuid[0:8])
job.ID = jobId
tc.jobIds = append(tc.jobIds, *jobId)
// Register job
jobs := nomadClient.Jobs()
resp, _, err := jobs.Register(job, nil)
require.Nil(err)
require.NotEmpty(resp.EvalID)
g := NewGomegaWithT(f.T())
// Wrap in retry to wait until placement
g.Eventually(func() []*api.AllocationListStub {
// Look for allocations
allocs, _, _ := jobs.Allocations(*job.ID, false, nil)
return allocs
}, 10*time.Second, time.Second).ShouldNot(BeEmpty())
allocs, _, err := jobs.Allocations(*job.ID, false, nil)
require.Nil(err)
return allocs
e2eutil.WaitForLeader(f.T(), tc.Nomad())
}
func (tc *BasicAffinityTest) TestSingleAffinities(f *framework.F) {
allocs := tc.registerAndWaitForAllocs(f, "affinities/input/single_affinity.nomad", "aff")
nomadClient := tc.Nomad()
jobID, allocs := e2eutil.RegisterAndWaitForAllocs(f, nomadClient, "affinities/input/single_affinity.nomad", "aff")
tc.jobIds = append(tc.jobIds, jobID)
jobAllocs := nomadClient.Allocations()
require := require.New(f.T())
// Verify affinity score metadata
@@ -87,9 +50,10 @@ func (tc *BasicAffinityTest) TestSingleAffinities(f *framework.F) {
}
func (tc *BasicAffinityTest) TestMultipleAffinities(f *framework.F) {
allocs := tc.registerAndWaitForAllocs(f, "affinities/input/multiple_affinities.nomad", "aff")
nomadClient := tc.Nomad()
jobID, allocs := e2eutil.RegisterAndWaitForAllocs(f, nomadClient, "affinities/input/multiple_affinities.nomad", "aff")
tc.jobIds = append(tc.jobIds, jobID)
jobAllocs := nomadClient.Allocations()
require := require.New(f.T())
@@ -126,9 +90,10 @@ func (tc *BasicAffinityTest) TestMultipleAffinities(f *framework.F) {
}
func (tc *BasicAffinityTest) TestAntiAffinities(f *framework.F) {
allocs := tc.registerAndWaitForAllocs(f, "affinities/input/anti_affinities.nomad", "aff")
nomadClient := tc.Nomad()
jobID, allocs := e2eutil.RegisterAndWaitForAllocs(f, nomadClient, "affinities/input/anti_affinities.nomad", "aff")
tc.jobIds = append(tc.jobIds, jobID)
jobAllocs := nomadClient.Allocations()
require := require.New(f.T())

57
e2e/e2eutil/utils.go Normal file
View File

@@ -0,0 +1,57 @@
package e2eutil
import (
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/e2e/framework"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/jobspec"
"github.com/hashicorp/nomad/testutil"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/require"
"testing"
"time"
)
// retries is used to control how many times to retry checking if the cluster has a leader yet
const retries = 500
func WaitForLeader(t *testing.T, nomadClient *api.Client) {
statusAPI := nomadClient.Status()
testutil.WaitForResultRetries(retries, func() (bool, error) {
leader, err := statusAPI.Leader()
return leader != "", err
}, func(err error) {
t.Fatalf("failed to find leader: %v", err)
})
}
func RegisterAndWaitForAllocs(f *framework.F, nomadClient *api.Client, jobFile string, prefix string) (string, []*api.AllocationListStub) {
// Parse job
job, err := jobspec.ParseFile(jobFile)
require := require.New(f.T())
require.Nil(err)
uuid := uuid.Generate()
jobId := helper.StringToPtr(prefix + uuid[0:8])
job.ID = jobId
// Register job
jobs := nomadClient.Jobs()
resp, _, err := jobs.Register(job, nil)
require.Nil(err)
require.NotEmpty(resp.EvalID)
g := NewGomegaWithT(f.T())
// Wrap in retry to wait until placement
g.Eventually(func() []*api.AllocationListStub {
// Look for allocations
allocs, _, _ := jobs.Allocations(*job.ID, false, nil)
return allocs
}, 10*time.Second, time.Second).ShouldNot(BeEmpty())
allocs, _, err := jobs.Allocations(*job.ID, false, nil)
require.Nil(err)
return *jobId, allocs
}

View File

@@ -1,21 +0,0 @@
package framework
import (
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/testutil"
"testing"
)
// retries is used to control how many times to retry checking if the cluster has a leader yet
const retries = 500
func WaitForLeader(t *testing.T, nomadClient *api.Client) {
statusAPI := nomadClient.Status()
testutil.WaitForResultRetries(retries, func() (bool, error) {
leader, err := statusAPI.Leader()
return leader != "", err
}, func(err error) {
t.Fatalf("failed to find leader: %v", err)
})
}

View File

@@ -1,24 +0,0 @@
job "test1" {
datacenters = ["dc1", "dc2"]
type = "service"
affinity {
attribute ="${meta.rack}"
operator = "="
value = "r1"
weight = -50
}
group "test1" {
count = 4
spread {
attribute ="${node.datacenter}"
}
task "test" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "sleep 15000"]
}
}
}
}

View File

@@ -1,20 +1,13 @@
package spread
import (
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/e2e/framework"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/jobspec"
"github.com/stretchr/testify/require"
. "github.com/onsi/gomega"
"testing"
"github.com/hashicorp/nomad/e2e/e2eutil"
)
type BasicSpreadStruct struct {
type SpreadTest struct {
framework.TC
jobIds []string
}
@@ -24,45 +17,24 @@ func init() {
Component: "Spread",
CanRunLocal: true,
Cases: []framework.TestCase{
new(BasicSpreadStruct),
new(SpreadTest),
},
})
}
func (tc *BasicSpreadStruct) TestEvenSpread(f *framework.F) {
func (tc *SpreadTest) BeforeAll(f *framework.F) {
// Ensure cluster has leader before running tests
e2eutil.WaitForLeader(f.T(), tc.Nomad())
}
func (tc *SpreadTest) TestEvenSpread(f *framework.F) {
nomadClient := tc.Nomad()
// Parse job
job, err := jobspec.ParseFile("spread/input/spread1.nomad")
require := require.New(f.T())
require.Nil(err)
uuid := uuid.Generate()
jobId := "spr" + uuid[0:8]
job.ID = helper.StringToPtr(jobId)
tc.jobIds = append(tc.jobIds, jobId)
// Register job
jobs := nomadClient.Jobs()
resp, _, err := jobs.Register(job, nil)
require.Nil(err)
require.NotEmpty(resp.EvalID)
g := NewGomegaWithT(f.T())
// Wrap in retry to wait until placement
g.Eventually(func() []*api.AllocationListStub {
// Look for allocations
allocs, _, _ := jobs.Allocations(*job.ID, false, nil)
return allocs
}, 2*time.Second, time.Second).ShouldNot(BeEmpty())
jobID, allocs := e2eutil.RegisterAndWaitForAllocs(f, nomadClient, "spread/input/even_spread.nomad", "spr")
tc.jobIds = append(tc.jobIds, jobID)
jobAllocs := nomadClient.Allocations()
allocs, _, _ := jobs.Allocations(*job.ID, false, nil)
dcToAllocs := make(map[string]int)
require := require.New(f.T())
// Verify spread score and alloc distribution
for _, allocStub := range allocs {
alloc, _, err := jobAllocs.Info(allocStub.ID, nil)
@@ -83,41 +55,16 @@ func (tc *BasicSpreadStruct) TestEvenSpread(f *framework.F) {
require.Equal(expectedDcToAllocs, dcToAllocs)
}
func (tc *BasicSpreadStruct) TestMultipleSpreads(f *framework.F) {
func (tc *SpreadTest) TestMultipleSpreads(f *framework.F) {
nomadClient := tc.Nomad()
// Parse job
job, err := jobspec.ParseFile("spread/input/spread2.nomad")
require := require.New(f.T())
require.Nil(err)
uuid := uuid.Generate()
jobId := "spr" + uuid[0:8]
job.ID = helper.StringToPtr(jobId)
tc.jobIds = append(tc.jobIds, jobId)
// Register job
jobs := nomadClient.Jobs()
resp, _, err := jobs.Register(job, nil)
require.Nil(err)
require.NotEmpty(resp.EvalID)
g := NewGomegaWithT(f.T())
// Wrap in retry to wait until placement
g.Eventually(func() []*api.AllocationListStub {
// Look for allocations
allocs, _, _ := jobs.Allocations(*job.ID, false, nil)
return allocs
}, 2*time.Second, time.Second).ShouldNot(BeEmpty())
jobID, allocs := e2eutil.RegisterAndWaitForAllocs(f, nomadClient, "spread/input/multiple_spread.nomad", "spr")
tc.jobIds = append(tc.jobIds, jobID)
jobAllocs := nomadClient.Allocations()
allocs, _, _ := jobs.Allocations(*job.ID, false, nil)
dcToAllocs := make(map[string]int)
rackToAllocs := make(map[string]int)
require := require.New(f.T())
// Verify spread score and alloc distribution
for _, allocStub := range allocs {
alloc, _, err := jobAllocs.Info(allocStub.ID, nil)
@@ -151,7 +98,7 @@ func (tc *BasicSpreadStruct) TestMultipleSpreads(f *framework.F) {
}
func (tc *BasicSpreadStruct) AfterEach(f *framework.F) {
func (tc *SpreadTest) AfterEach(f *framework.F) {
nomadClient := tc.Nomad()
jobs := nomadClient.Jobs()
// Stop all jobs in test
@@ -161,12 +108,3 @@ func (tc *BasicSpreadStruct) AfterEach(f *framework.F) {
// Garbage collect
nomadClient.System().GarbageCollect()
}
func TestCalledFromGoTest(t *testing.T) {
framework.New().AddSuites(&framework.TestSuite{
Component: "foo",
Cases: []framework.TestCase{
new(BasicSpreadStruct),
},
}).Run(t)
}