mirror of
https://github.com/kemko/nomad.git
synced 2026-01-06 10:25:42 +03:00
Merge pull request #3430 from hashicorp/f-e2e-migration-test
Add e2e test for testing migrations
This commit is contained in:
20
e2e/migrations/Dockerfile
Normal file
20
e2e/migrations/Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
||||
FROM ubuntu:17.10
|
||||
|
||||
RUN apt-get update -y
|
||||
|
||||
RUN apt-get install -y \
|
||||
build-essential \
|
||||
git \
|
||||
golang \
|
||||
liblxc1
|
||||
|
||||
ENV GOPATH=$HOME/gopkg
|
||||
ENV PATH=$PATH:$GOPATH/bin:/usr/local/lib
|
||||
|
||||
COPY nomad /bin/nomad
|
||||
|
||||
RUN mkdir -p /nomad/data && \
|
||||
mkdir -p /etc/nomad && \
|
||||
mkdir -p gopkg/src/github.com/nomad
|
||||
|
||||
RUN go get github.com/stretchr/testify/assert
|
||||
18
e2e/migrations/README.md
Normal file
18
e2e/migrations/README.md
Normal file
@@ -0,0 +1,18 @@
|
||||
## End to end tests for migrating data in sticky volumes
|
||||
|
||||
These tests run in a docker container to ensure proper setup/teardown.
|
||||
|
||||
To create the testing image:
|
||||
`./docker-init.sh`
|
||||
|
||||
To run tests:
|
||||
`./docker-run.sh`
|
||||
|
||||
TODO:
|
||||
1. Specify how many servers/clients in the test
|
||||
2. Have a callback to specify the client options
|
||||
3. Run servers/clients in the docker container, return IP addresses for each
|
||||
instance, but have the test run on the host.
|
||||
4. There should be a 1:1 mapping from container to agent, rather than running
|
||||
the entire cluster in a container.
|
||||
|
||||
18
e2e/migrations/client1.hcl
Normal file
18
e2e/migrations/client1.hcl
Normal file
@@ -0,0 +1,18 @@
|
||||
log_level = "DEBUG"
|
||||
|
||||
data_dir = "/tmp/client1"
|
||||
|
||||
datacenter = "dc1"
|
||||
|
||||
client {
|
||||
enabled = true
|
||||
servers = ["127.0.0.1:4647"]
|
||||
meta {
|
||||
secondary = 1
|
||||
}
|
||||
}
|
||||
|
||||
ports {
|
||||
http = 5656
|
||||
}
|
||||
|
||||
18
e2e/migrations/client2.hcl
Normal file
18
e2e/migrations/client2.hcl
Normal file
@@ -0,0 +1,18 @@
|
||||
log_level = "DEBUG"
|
||||
|
||||
data_dir = "/tmp/client2"
|
||||
|
||||
datacenter = "dc1"
|
||||
|
||||
client {
|
||||
enabled = true
|
||||
servers = ["127.0.0.1:4647"]
|
||||
meta {
|
||||
secondary = 0
|
||||
}
|
||||
}
|
||||
|
||||
ports {
|
||||
http = 5657
|
||||
}
|
||||
|
||||
2
e2e/migrations/docker-init.sh
Executable file
2
e2e/migrations/docker-init.sh
Executable file
@@ -0,0 +1,2 @@
|
||||
|
||||
docker build -t nomad-e2e .
|
||||
7
e2e/migrations/docker-run.sh
Executable file
7
e2e/migrations/docker-run.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
CURRENT_DIRECTORY=`pwd`
|
||||
ROOT_DIRECTORY="$( dirname "$(dirname "$CURRENT_DIRECTORY")")"
|
||||
|
||||
docker run --privileged -v \
|
||||
$ROOT_DIRECTORY:/gopkg/src/github.com/hashicorp/nomad \
|
||||
-it nomad-e2e /bin/bash \
|
||||
-c "cd gopkg/src/github.com/hashicorp/nomad/e2e/migrations && go test -integration"
|
||||
200
e2e/migrations/migrations_test.go
Normal file
200
e2e/migrations/migrations_test.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var integration = flag.Bool("integration", false, "run integration tests")
|
||||
|
||||
const sleepJobOne = `job "sleep" {
|
||||
type = "batch"
|
||||
datacenters = ["dc1"]
|
||||
constraint {
|
||||
attribute = "${meta.secondary}"
|
||||
value = 1
|
||||
}
|
||||
group "group1" {
|
||||
restart {
|
||||
mode = "fail"
|
||||
}
|
||||
count = 1
|
||||
ephemeral_disk {
|
||||
migrate = true
|
||||
sticky = true
|
||||
}
|
||||
task "sleep" {
|
||||
template {
|
||||
data = "hello world"
|
||||
destination = "/local/hello-world"
|
||||
}
|
||||
driver = "exec"
|
||||
config {
|
||||
command = "/bin/sleep"
|
||||
args = [ "infinity" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
const sleepJobTwo = `job "sleep" {
|
||||
type = "batch"
|
||||
datacenters = ["dc1"]
|
||||
constraint {
|
||||
attribute = "${meta.secondary}"
|
||||
value = 0
|
||||
}
|
||||
group "group1" {
|
||||
restart {
|
||||
mode = "fail"
|
||||
}
|
||||
count = 1
|
||||
ephemeral_disk {
|
||||
migrate = true
|
||||
sticky = true
|
||||
}
|
||||
task "sleep" {
|
||||
driver = "exec"
|
||||
|
||||
config {
|
||||
command = "test"
|
||||
args = [ "-f", "/local/hello-world" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
// isSuccess waits until a given keyword is not present in the output of a
|
||||
// command. For example, isSuccess will poll for a given timeperiod as long as
|
||||
// the output of the command of "nomad node-status" includes the keyword
|
||||
// "initializing." The absence of this keyword means this command has returned
|
||||
// successfully.
|
||||
func isSuccess(execCmd *exec.Cmd, retries int, keyword string) (string, error) {
|
||||
var successOut string
|
||||
var err error
|
||||
|
||||
testutil.WaitForResultRetries(2000, func() (bool, error) {
|
||||
var out bytes.Buffer
|
||||
cmd := *execCmd
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
success := (out.String() != "" && !strings.Contains(out.String(), keyword))
|
||||
if !success {
|
||||
out.Reset()
|
||||
return false, err
|
||||
}
|
||||
|
||||
successOut = out.String()
|
||||
return true, nil
|
||||
}, func(cmd_err error) {
|
||||
err = cmd_err
|
||||
})
|
||||
|
||||
return successOut, err
|
||||
}
|
||||
|
||||
// allNodesAreReady attempts to query the status of a cluster a specific number
|
||||
// of times
|
||||
func allNodesAreReady(retries int) (string, error) {
|
||||
cmd := exec.Command("nomad", "node-status")
|
||||
return isSuccess(cmd, retries, "initializing")
|
||||
}
|
||||
|
||||
// jobIsReady attempts sto query the status of a specific job a fixed number of
|
||||
// times
|
||||
func jobIsReady(retries int, jobName string) (string, error) {
|
||||
cmd := exec.Command("nomad", "job", "status", jobName)
|
||||
return isSuccess(cmd, retries, "pending")
|
||||
}
|
||||
|
||||
// startCluster will create a running cluster, given a list of agent config
|
||||
// files. In order to have a complete cluster, at least one server and one
|
||||
// client config file should be included.
|
||||
func startCluster(clusterConfig []string) (func(), error) {
|
||||
cmds := make([]*exec.Cmd, 0)
|
||||
|
||||
for _, agentConfig := range clusterConfig {
|
||||
cmd := exec.Command("nomad", "agent", "-config", agentConfig)
|
||||
err := cmd.Start()
|
||||
|
||||
if err != nil {
|
||||
return func() {}, err
|
||||
}
|
||||
|
||||
cmds = append(cmds, cmd)
|
||||
}
|
||||
|
||||
f := func() {
|
||||
for _, cmd := range cmds {
|
||||
cmd.Process.Kill()
|
||||
}
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func TestJobMigrations(t *testing.T) {
|
||||
flag.Parse()
|
||||
if !*integration {
|
||||
t.Skip("skipping test in non-integration mode.")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
assert := assert.New(t)
|
||||
|
||||
clusterConfig := []string{"server.hcl", "client1.hcl", "client2.hcl"}
|
||||
stopCluster, err := startCluster(clusterConfig)
|
||||
assert.Nil(err)
|
||||
defer stopCluster()
|
||||
|
||||
_, err = allNodesAreReady(10)
|
||||
assert.Nil(err)
|
||||
|
||||
fh, err := ioutil.TempFile("", "nomad-sleep-1")
|
||||
assert.Nil(err)
|
||||
|
||||
defer os.Remove(fh.Name())
|
||||
_, err = fh.WriteString(sleepJobOne)
|
||||
|
||||
assert.Nil(err)
|
||||
|
||||
jobCmd := exec.Command("nomad", "run", fh.Name())
|
||||
err = jobCmd.Run()
|
||||
assert.Nil(err)
|
||||
|
||||
firstJobOutput, err := jobIsReady(20, "sleep")
|
||||
assert.Nil(err)
|
||||
assert.NotContains(firstJobOutput, "failed")
|
||||
assert.NotContains(firstJobOutput, "pending")
|
||||
|
||||
fh2, err := ioutil.TempFile("", "nomad-sleep-2")
|
||||
assert.Nil(err)
|
||||
|
||||
defer os.Remove(fh2.Name())
|
||||
_, err = fh2.WriteString(sleepJobTwo)
|
||||
|
||||
assert.Nil(err)
|
||||
|
||||
secondJobCmd := exec.Command("nomad", "run", fh2.Name())
|
||||
err = secondJobCmd.Run()
|
||||
assert.Nil(err)
|
||||
|
||||
jobOutput, err := jobIsReady(20, "sleep")
|
||||
assert.Nil(err)
|
||||
assert.NotContains(jobOutput, "failed")
|
||||
assert.NotContains(jobOutput, "pending")
|
||||
assert.Contains(jobOutput, "complete")
|
||||
}
|
||||
10
e2e/migrations/server.hcl
Normal file
10
e2e/migrations/server.hcl
Normal file
@@ -0,0 +1,10 @@
|
||||
log_level = "DEBUG"
|
||||
|
||||
data_dir = "/tmp/server1"
|
||||
|
||||
server {
|
||||
enabled = true
|
||||
|
||||
bootstrap_expect = 1
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user