More e2e tests

This commit is contained in:
Preetha Appan
2018-03-29 11:35:45 -05:00
parent fefbdd3178
commit db2bdb4410
11 changed files with 265 additions and 73 deletions

View File

@@ -1,30 +1,35 @@
job "test1" {
datacenters = ["dc1"]
type = "service"
type = "service"
group "t1" {
count = 3
task "t1" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "lol 5000"]
command = "bash"
args = ["-c", "lol 5000"]
}
}
update {
max_parallel = 1
min_healthy_time = "10s"
auto_revert = false
max_parallel = 1
min_healthy_time = "10s"
auto_revert = false
}
restart {
attempts = 0
delay = "0s"
mode = "fail"
}
reschedule {
attempts = 0
interval = "5m"
delay = "0s"
mode = "fail"
}
reschedule {
attempts = 0
interval = "5m"
unlimited = false
}
}
}
}
}

View File

@@ -1,25 +1,29 @@
job "test3" {
datacenters = ["dc1"]
type = "service"
type = "service"
group "t3" {
count = 3
task "t3" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "a=`cksum <<< \"${NOMAD_ALLOC_ID}\"| cut -d ' ' -f1`; if ! (( a % 2 )); then sleep 5000; else exit -1; fi"]
command = "bash"
args = ["-c", "a=`cksum <<< \"${NOMAD_ALLOC_ID}\"| cut -d ' ' -f1`; if ! (( a % 2 )); then sleep 5000; else exit -1; fi"]
}
}
restart {
attempts = 0
delay = "0s"
mode = "fail"
attempts = 0
delay = "0s"
mode = "fail"
}
reschedule {
attempts = 2
interval = "5m"
attempts = 2
interval = "5m"
unlimited = false
}
}
}
}
}

View File

@@ -1,31 +1,37 @@
job "test5" {
datacenters = ["dc1"]
type = "service"
type = "service"
group "t5" {
count = 3
task "t5" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "sleep 5000"]
command = "bash"
args = ["-c", "sleep 5000"]
}
}
update {
max_parallel = 1
canary = 1
min_healthy_time = "1s"
auto_revert = false
max_parallel = 1
canary = 1
min_healthy_time = "1s"
auto_revert = false
}
restart {
attempts = 0
delay = "0s"
mode = "fail"
attempts = 0
delay = "0s"
mode = "fail"
}
reschedule {
attempts = 3
interval = "5m"
attempts = 3
interval = "5m"
delay = "5s"
unlimited = false
}
}
}
}
}

View File

@@ -0,0 +1,34 @@
job "test" {
datacenters = ["dc1"]
type = "service"
group "t1" {
count = 3
task "t1" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "sleep 5000"]
}
}
update {
canary = 3
max_parallel = 1
min_healthy_time = "1s"
healthy_deadline = "1m"
auto_revert = true
}
restart {
attempts = 0
mode = "fail"
}
reschedule {
unlimited = "true"
}
}
}

View File

@@ -1,21 +1,23 @@
job "test" {
datacenters = ["dc1"]
type = "service"
type = "service"
group "t" {
count = 3
task "t" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "lol 5000"]
}
}
restart {
attempts = 0
delay = "0s"
mode = "fail"
}
}
}
}

View File

@@ -1,25 +1,29 @@
job "test2" {
datacenters = ["dc1"]
type = "service"
type = "service"
group "t2" {
count = 3
task "t2" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "lol 5000"]
command = "bash"
args = ["-c", "lol 5000"]
}
}
restart {
attempts = 0
delay = "0s"
mode = "fail"
attempts = 0
delay = "0s"
mode = "fail"
}
reschedule {
attempts = 2
interval = "5m"
attempts = 2
interval = "5m"
unlimited = false
}
}
}
}
}

View File

@@ -0,0 +1,35 @@
job "demo2" {
datacenters = ["dc1"]
type = "service"
group "t2" {
count = 3
task "t2" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "sleep 5000"]
}
}
update {
max_parallel = 1
min_healthy_time = "5s"
healthy_deadline = "10m"
auto_revert = false
}
restart {
attempts = 0
mode = "fail"
}
reschedule {
unlimited = "true"
# attempts = 0
}
}
}

View File

@@ -0,0 +1,35 @@
job "demo3" {
datacenters = ["dc1"]
type = "service"
group "t2" {
count = 3
task "t2" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "ssleep 5000"]
}
}
update {
max_parallel = 1
min_healthy_time = "5s"
healthy_deadline = "10m"
auto_revert = true
}
restart {
attempts = 0
mode = "fail"
}
reschedule {
unlimited = "true"
# attempts = 0
}
}
}

View File

@@ -1,20 +1,23 @@
job "test" {
datacenters = ["dc1"]
type = "system"
type = "system"
group "t" {
count = 1
task "t" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "lol 5000"]
}
}
restart {
attempts = 0
delay = "0s"
mode = "fail"
}
}
}
}

View File

@@ -1,30 +1,35 @@
job "test4" {
datacenters = ["dc1"]
type = "service"
type = "service"
group "t4" {
count = 3
task "t4" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "sleep 5000"]
command = "bash"
args = ["-c", "sleep 5000"]
}
}
update {
max_parallel = 1
min_healthy_time = "10s"
auto_revert = false
max_parallel = 1
min_healthy_time = "10s"
auto_revert = false
}
restart {
attempts = 0
delay = "0s"
mode = "fail"
attempts = 0
delay = "0s"
mode = "fail"
}
reschedule {
attempts = 3
interval = "5m"
attempts = 3
interval = "5m"
unlimited = false
}
}
}
}
}

View File

@@ -12,16 +12,18 @@ import (
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/structs"
)
var _ = Describe("Server Side Restart Tests", func() {
var (
jobs *api.Jobs
system *api.System
job *api.Job
err error
specFile string
jobs *api.Jobs
system *api.System
deployments *api.Deployments
job *api.Job
err error
specFile string
// allocStatuses is a helper function that pulls
// out client statuses from a slice of allocs
@@ -43,12 +45,27 @@ var _ = Describe("Server Side Restart Tests", func() {
Expect(err).ShouldNot(HaveOccurred())
var ret []string
for _, a := range allocs {
if a.RescheduleTracker != nil && len(a.RescheduleTracker.Events) > 0 {
if (a.RescheduleTracker != nil && len(a.RescheduleTracker.Events) > 0) || a.FollowupEvalID != "" {
ret = append(ret, a.ClientStatus)
}
}
return ret
}
// deploymentStatus is a helper function that returns deployment status of all deployments
// sorted by time
deploymentStatus = func() []string {
deploys, _, err := jobs.Deployments(*job.ID, nil)
Expect(err).ShouldNot(HaveOccurred())
var ret []string
sort.Slice(deploys, func(i, j int) bool {
return deploys[i].CreateIndex < deploys[j].CreateIndex
})
for _, d := range deploys {
ret = append(ret, d.Status)
}
return ret
}
)
BeforeSuite(func() {
@@ -59,6 +76,7 @@ var _ = Describe("Server Side Restart Tests", func() {
Expect(err).ShouldNot(HaveOccurred())
jobs = client.Jobs()
system = client.System()
deployments = client.Deployments()
})
JustBeforeEach(func() {
@@ -167,21 +185,62 @@ var _ = Describe("Server Side Restart Tests", func() {
BeforeEach(func() {
specFile = "input/rescheduling_canary.hcl"
})
It("Should have all running allocs", func() {
It("Should have running allocs and successful deployment", func() {
Eventually(allocStatuses, 3*time.Second, time.Second).Should(
ConsistOf([]string{"running", "running", "running"}))
time.Sleep(2 * time.Second) //TODO(preetha) figure out why this wasn't working with ginkgo constructs
Eventually(deploymentStatus(), 2*time.Second, time.Second).Should(
ContainElement(structs.DeploymentStatusSuccessful))
})
Context("Updating job to make allocs fail", func() {
It("Should have no rescheduled allocs", func() {
job.TaskGroups[0].Tasks[0].Config["args"] = []string{"-c", "lol"}
_, _, err := jobs.Register(job, nil)
Expect(err).ShouldNot(HaveOccurred())
Eventually(allocStatusesRescheduled, 2*time.Second, time.Second).Should(BeEmpty())
// Verify new deployment and its status
time.Sleep(3 * time.Second) //TODO(preetha) figure out why this wasn't working with ginkgo constructs
Eventually(deploymentStatus(), 2*time.Second, time.Second).Should(
ContainElement(structs.DeploymentStatusFailed))
})
})
})
Context("Reschedule with canary and auto revert ", func() {
BeforeEach(func() {
specFile = "input/rescheduling_canary_autorevert.hcl"
})
It("Should have running allocs and successful deployment", func() {
Eventually(allocStatuses, 3*time.Second, time.Second).Should(
ConsistOf([]string{"running", "running", "running"}))
time.Sleep(4 * time.Second)
Eventually(deploymentStatus(), 2*time.Second, time.Second).Should(
ContainElement(structs.DeploymentStatusSuccessful))
// Make an update that causes the job to fail
job.TaskGroups[0].Tasks[0].Config["args"] = []string{"-c", "lol"}
_, _, err := jobs.Register(job, nil)
Expect(err).ShouldNot(HaveOccurred())
Eventually(allocStatusesRescheduled, 2*time.Second, time.Second).Should(BeEmpty())
// Wait for the revert
Eventually(allocStatuses, 3*time.Second, time.Second).Should(
ConsistOf([]string{"failed", "failed", "failed", "running", "running", "running"}))
// Verify new deployment and its status
// There should be one successful, one failed, and one more successful (after revert)
time.Sleep(5 * time.Second) //TODO(preetha) figure out why this wasn't working with ginkgo constructs
Eventually(deploymentStatus(), 2*time.Second, time.Second).Should(
ConsistOf(structs.DeploymentStatusSuccessful, structs.DeploymentStatusFailed, structs.DeploymentStatusSuccessful))
})
})
})
})