mirror of
https://github.com/kemko/nomad.git
synced 2026-01-04 09:25:46 +03:00
Merge pull request #5776 from hashicorp/b-flaky-e2e-test
flaky e2e test
This commit is contained in:
@@ -7,7 +7,8 @@ The `terraform` folder has provisioning code to spin up a Nomad cluster on AWS.
|
||||
|
||||
Local Development
|
||||
=================
|
||||
The workflow when developing end to end tests locally is to run the provisioning step described below once, and then run the tests as described below.
|
||||
The workflow when developing end to end tests locally is to run the provisioning step described below once, and then run the tests as described below.
|
||||
When making local changes, use `./bin/update $(which nomad) /usr/local/bin/nomad` and `./bin/run sudo systemctl restart nomad` to destructively modify the provisioned cluster.
|
||||
|
||||
Provisioning
|
||||
============
|
||||
|
||||
15
e2e/bin/run
Executable file
15
e2e/bin/run
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
echo "./run.sh commands..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
nodes=$(terraform output -json -state=terraform/terraform.tfstate | jq -r '(.clients,.servers).value[]')
|
||||
for node in $nodes
|
||||
do
|
||||
echo Executing: ssh -i terraform/keys/*.pem ubuntu@$node "$@"
|
||||
ssh -o StrictHostKeyChecking=accept-new -i terraform/keys/*.pem ubuntu@$node "$@"
|
||||
done
|
||||
17
e2e/bin/update
Executable file
17
e2e/bin/update
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "./upload.sh <source> <destination>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
nodes=$(terraform output -json -state=terraform/terraform.tfstate | jq -r '(.clients,.servers).value[]')
|
||||
for node in $nodes
|
||||
do
|
||||
echo Executing: scp -C -i terraform/keys/*.pem "$1" ubuntu@$node:"$2"
|
||||
# scp -o StrictHostKeyChecking=accept-new -C -i terraform/keys/*.pem "$1" ubuntu@$node:"$2"
|
||||
scp -o StrictHostKeyChecking=accept-new -C -i terraform/keys/*.pem "$1" ubuntu@$node:/tmp/uploaded
|
||||
ssh -i terraform/keys/*.pem ubuntu@$node sudo mv /tmp/uploaded "$2"
|
||||
done
|
||||
@@ -1,9 +1,11 @@
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
@@ -34,29 +36,35 @@ func (tc *DeploymentTest) BeforeAll(f *framework.F) {
|
||||
func (tc *DeploymentTest) TestDeploymentAutoPromote(f *framework.F) {
|
||||
t := f.T()
|
||||
nomadClient := tc.Nomad()
|
||||
run := structs.DeploymentStatusRunning
|
||||
uuid := uuid.Generate()
|
||||
// unique each run, cluster could have previous jobs
|
||||
jobId := "deployment" + uuid[0:8]
|
||||
tc.jobIds = append(tc.jobIds, jobId)
|
||||
e2eutil.RegisterAndWaitForAllocs(t, nomadClient, "deployment/input/deployment_auto0.nomad", jobId)
|
||||
ds := e2eutil.DeploymentsForJob(t, nomadClient, jobId)
|
||||
require.Equal(t, 1, len(ds))
|
||||
deploy := ds[0]
|
||||
|
||||
// Upgrade
|
||||
e2eutil.RegisterAllocs(t, nomadClient, "deployment/input/deployment_auto1.nomad", jobId)
|
||||
var deploy *api.Deployment
|
||||
ds, _, err := nomadClient.Deployments().List(nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Find the deployment
|
||||
for _, d := range ds {
|
||||
if d.JobID == jobId {
|
||||
deploy = d
|
||||
break
|
||||
// Find the deployment we don't already have
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
ds = e2eutil.DeploymentsForJob(t, nomadClient, jobId)
|
||||
for _, d := range ds {
|
||||
if d.ID != deploy.ID {
|
||||
deploy = d
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, fmt.Errorf("missing update deployment for job %s", jobId)
|
||||
}, func(e error) {
|
||||
require.NoError(t, e)
|
||||
})
|
||||
|
||||
// Deployment is auto pending the upgrade of "two" which has a longer time to health
|
||||
run := structs.DeploymentStatusRunning
|
||||
require.Equal(t, run, deploy.Status)
|
||||
require.Equal(t, structs.DeploymentStatusDescriptionRunningAutoPromotion, deploy.StatusDescription)
|
||||
e2eutil.WaitForDeployment(t, nomadClient, deploy.ID, run, structs.DeploymentStatusDescriptionRunningAutoPromotion)
|
||||
|
||||
// Deployment is eventually running
|
||||
e2eutil.WaitForDeployment(t, nomadClient, deploy.ID, run, structs.DeploymentStatusDescriptionRunning)
|
||||
|
||||
@@ -111,6 +111,20 @@ func WaitForAllocRunning(t *testing.T, nomadClient *api.Client, allocID string)
|
||||
})
|
||||
}
|
||||
|
||||
func DeploymentsForJob(t *testing.T, nomadClient *api.Client, jobID string) []*api.Deployment {
|
||||
ds, _, err := nomadClient.Deployments().List(nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
out := []*api.Deployment{}
|
||||
for _, d := range ds {
|
||||
if d.JobID == jobID {
|
||||
out = append(out, d)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func WaitForDeployment(t *testing.T, nomadClient *api.Client, deployID string, status string, statusDesc string) {
|
||||
testutil.WaitForResultRetries(retries, func() (bool, error) {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
@@ -123,10 +137,10 @@ func WaitForDeployment(t *testing.T, nomadClient *api.Client, deployID string, s
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("expected status %s \"%s\", but got: %s \"%s\"",
|
||||
deploy.Status,
|
||||
deploy.StatusDescription,
|
||||
status,
|
||||
statusDesc,
|
||||
deploy.Status,
|
||||
deploy.StatusDescription,
|
||||
)
|
||||
|
||||
}, func(err error) {
|
||||
|
||||
@@ -69,7 +69,7 @@ web 0 0 3 0 0 0
|
||||
Latest Deployment
|
||||
ID = 9fa81f27
|
||||
Status = running
|
||||
Description = Deployment is running but requires promotion
|
||||
Description = Deployment is running but requires manual promotion
|
||||
|
||||
Deployed
|
||||
Task Group Promoted Desired Canaries Placed Healthy Unhealthy
|
||||
@@ -157,7 +157,7 @@ web 0 0 3 0 0 0
|
||||
Latest Deployment
|
||||
ID = a6b87a6c
|
||||
Status = running
|
||||
Description = Deployment is running but requires promotion
|
||||
Description = Deployment is running but requires manual promotion
|
||||
|
||||
Deployed
|
||||
Task Group Promoted Desired Canaries Placed Healthy Unhealthy
|
||||
@@ -202,7 +202,7 @@ web 0 0 3 0 0 0
|
||||
Latest Deployment
|
||||
ID = a6b87a6c
|
||||
Status = running
|
||||
Description = Deployment is running but requires promotion
|
||||
Description = Deployment is running but requires manual promotion
|
||||
|
||||
Deployed
|
||||
Task Group Promoted Desired Canaries Placed Healthy Unhealthy
|
||||
|
||||
@@ -59,7 +59,7 @@ ID = 0b23b149
|
||||
Job ID = example
|
||||
Job Version = 1
|
||||
Status = running
|
||||
Description = Deployment is running but requires promotion
|
||||
Description = Deployment is running but requires manual promotion
|
||||
|
||||
Deployed
|
||||
Task Group Promoted Desired Canaries Placed Healthy Unhealthy
|
||||
|
||||
@@ -44,7 +44,7 @@ List the deployment for a particular job:
|
||||
```
|
||||
$ nomad job deployments example
|
||||
ID Job ID Job Version Status Description
|
||||
0b23b149 example 1 running Deployment is running but requires promotion
|
||||
0b23b149 example 1 running Deployment is running but requires manual promotion
|
||||
06ca68a2 example 0 successful Deployment completed successfully
|
||||
```
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ web 0 0 3 0 0 0
|
||||
Latest Deployment
|
||||
ID = 9fa81f27
|
||||
Status = running
|
||||
Description = Deployment is running but requires promotion
|
||||
Description = Deployment is running but requires manual promotion
|
||||
|
||||
Deployed
|
||||
Task Group Promoted Desired Canaries Placed Healthy Unhealthy
|
||||
@@ -157,7 +157,7 @@ web 0 0 3 0 0 0
|
||||
Latest Deployment
|
||||
ID = a6b87a6c
|
||||
Status = running
|
||||
Description = Deployment is running but requires promotion
|
||||
Description = Deployment is running but requires manual promotion
|
||||
|
||||
Deployed
|
||||
Task Group Promoted Desired Canaries Placed Healthy Unhealthy
|
||||
@@ -202,7 +202,7 @@ web 0 0 3 0 0 0
|
||||
Latest Deployment
|
||||
ID = a6b87a6c
|
||||
Status = running
|
||||
Description = Deployment is running but requires promotion
|
||||
Description = Deployment is running but requires manual promotion
|
||||
|
||||
Deployed
|
||||
Task Group Promoted Desired Canaries Placed Healthy Unhealthy
|
||||
|
||||
@@ -126,7 +126,7 @@ api 0 0 10 0 0 0
|
||||
Latest Deployment
|
||||
ID = 32a080c1
|
||||
Status = running
|
||||
Description = Deployment is running but requires promotion
|
||||
Description = Deployment is running but requires manual promotion
|
||||
|
||||
Deployed
|
||||
Task Group Auto Revert Promoted Desired Canaries Placed Healthy Unhealthy
|
||||
@@ -385,7 +385,7 @@ api 0 0 6 0 0 0
|
||||
Latest Deployment
|
||||
ID = 32a080c1
|
||||
Status = running
|
||||
Description = Deployment is running but requires promotion
|
||||
Description = Deployment is running but requires manual promotion
|
||||
|
||||
Deployed
|
||||
Task Group Auto Revert Promoted Desired Canaries Placed Healthy Unhealthy
|
||||
|
||||
Reference in New Issue
Block a user