Parallel client tests (#2890)

* alloc_runner

* Random tests

* parallel task_runner and no exec compatible check

* Parallel client

* Fail fast and use random ports

* Fix docker port mapping

* Make concurrent pull less timing dependant

* up parallel

* Fixes

* don't build chroots in parallel on travis

* Reduce parallelism on travis with lxc/rkt

* make java test app not run forever

* drop parallelism a little

* use docker ports that are out of the os's ephemeral port range

* Limit even more on travis

* rkt deadline
This commit is contained in:
Alex Dadgar
2017-07-22 19:04:36 -07:00
committed by GitHub
parent 5d4b0ab016
commit 08c2ba9bc6
23 changed files with 407 additions and 217 deletions

View File

@@ -20,7 +20,6 @@ import (
"github.com/kr/pretty"
"github.com/hashicorp/nomad/client/config"
ctestutil "github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/client/vaultclient"
)
@@ -66,11 +65,16 @@ func testAllocRunnerFromAlloc(alloc *structs.Allocation, restarts bool) (*MockAl
}
func testAllocRunner(restarts bool) (*MockAllocStateUpdater, *AllocRunner) {
return testAllocRunnerFromAlloc(mock.Alloc(), restarts)
// Use mock driver
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
task.Config["run_for"] = "500ms"
return testAllocRunnerFromAlloc(alloc, restarts)
}
func TestAllocRunner_SimpleRun(t *testing.T) {
ctestutil.ExecCompatible(t)
t.Parallel()
upd, ar := testAllocRunner(false)
go ar.Run()
defer ar.Destroy()
@@ -91,7 +95,7 @@ func TestAllocRunner_SimpleRun(t *testing.T) {
// Test that the watcher will mark the allocation as unhealthy.
func TestAllocRunner_DeploymentHealth_Unhealthy_BadStart(t *testing.T) {
ctestutil.ExecCompatible(t)
t.Parallel()
// Ensure the task fails and restarts
upd, ar := testAllocRunner(false)
@@ -129,7 +133,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_BadStart(t *testing.T) {
// Test that the watcher will mark the allocation as unhealthy if it hits its
// deadline.
func TestAllocRunner_DeploymentHealth_Unhealthy_Deadline(t *testing.T) {
ctestutil.ExecCompatible(t)
t.Parallel()
// Ensure the task fails and restarts
upd, ar := testAllocRunner(false)
@@ -168,7 +172,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_Deadline(t *testing.T) {
// Test that the watcher will mark the allocation as healthy.
func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) {
ctestutil.ExecCompatible(t)
t.Parallel()
// Ensure the task fails and restarts
upd, ar := testAllocRunner(false)
@@ -216,7 +220,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) {
// Test that the watcher will mark the allocation as healthy with checks
func TestAllocRunner_DeploymentHealth_Healthy_Checks(t *testing.T) {
ctestutil.ExecCompatible(t)
t.Parallel()
// Ensure the task fails and restarts
upd, ar := testAllocRunner(false)
@@ -285,7 +289,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_Checks(t *testing.T) {
// Test that the watcher will mark the allocation as healthy.
func TestAllocRunner_DeploymentHealth_Healthy_UpdatedDeployment(t *testing.T) {
ctestutil.ExecCompatible(t)
t.Parallel()
// Ensure the task fails and restarts
upd, ar := testAllocRunner(false)
@@ -346,7 +350,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_UpdatedDeployment(t *testing.T) {
// retrying fetching an artifact, other tasks in the group should be able
// to proceed.
func TestAllocRunner_RetryArtifact(t *testing.T) {
ctestutil.ExecCompatible(t)
t.Parallel()
alloc := mock.Alloc()
alloc.Job.Type = structs.JobTypeBatch
@@ -365,7 +369,7 @@ func TestAllocRunner_RetryArtifact(t *testing.T) {
badtask := alloc.Job.TaskGroups[0].Tasks[0].Copy()
badtask.Name = "bad"
badtask.Artifacts = []*structs.TaskArtifact{
{GetterSource: "http://127.1.1.111:12315/foo/bar/baz"},
{GetterSource: "http://127.0.0.1:0/foo/bar/baz"},
}
alloc.Job.TaskGroups[0].Tasks = append(alloc.Job.TaskGroups[0].Tasks, badtask)
@@ -404,6 +408,7 @@ func TestAllocRunner_RetryArtifact(t *testing.T) {
}
func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
t.Parallel()
upd, ar := testAllocRunner(false)
// Ensure task takes some time
@@ -502,6 +507,7 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
}
func TestAllocRunner_Destroy(t *testing.T) {
t.Parallel()
upd, ar := testAllocRunner(false)
// Ensure task takes some time
@@ -557,6 +563,7 @@ func TestAllocRunner_Destroy(t *testing.T) {
}
func TestAllocRunner_Update(t *testing.T) {
t.Parallel()
_, ar := testAllocRunner(false)
// Deep copy the alloc to avoid races when updating
@@ -583,6 +590,7 @@ func TestAllocRunner_Update(t *testing.T) {
}
func TestAllocRunner_SaveRestoreState(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -657,6 +665,7 @@ func TestAllocRunner_SaveRestoreState(t *testing.T) {
}
func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
t.Parallel()
upd, ar := testAllocRunner(false)
ar.logger = prefixedTestLogger("ar1: ")
@@ -779,6 +788,7 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
// TestAllocRunner_SaveRestoreState_Upgrade asserts that pre-0.6 exec tasks are
// restarted on upgrade.
func TestAllocRunner_SaveRestoreState_Upgrade(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -876,6 +886,7 @@ func TestAllocRunner_SaveRestoreState_Upgrade(t *testing.T) {
// "AllocID": "2a54fcff-fc44-8d4f-e025-53c48e9cbbbb"
// }
func TestAllocRunner_RestoreOldState(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -994,6 +1005,7 @@ func TestAllocRunner_RestoreOldState(t *testing.T) {
}
func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
t.Parallel()
upd, ar := testAllocRunner(false)
// Create two tasks in the task group
@@ -1061,6 +1073,7 @@ func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
}
func TestAllocRunner_TaskLeader_KillTG(t *testing.T) {
t.Parallel()
upd, ar := testAllocRunner(false)
// Create two tasks in the task group
@@ -1134,6 +1147,7 @@ func TestAllocRunner_TaskLeader_KillTG(t *testing.T) {
// TestAllocRunner_TaskLeader_StopTG asserts that when stopping a task group
// with a leader the leader is stopped before other tasks.
func TestAllocRunner_TaskLeader_StopTG(t *testing.T) {
t.Parallel()
upd, ar := testAllocRunner(false)
// Create 3 tasks in the task group
@@ -1218,6 +1232,7 @@ func TestAllocRunner_TaskLeader_StopTG(t *testing.T) {
}
func TestAllocRunner_MoveAllocDir(t *testing.T) {
t.Parallel()
// Create an alloc runner
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]

View File

@@ -7,17 +7,18 @@ import (
"io"
"io/ioutil"
"log"
"math/rand"
"net"
"os"
"path/filepath"
"runtime"
"sync/atomic"
"testing"
"time"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/fingerprint"
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
@@ -28,35 +29,19 @@ import (
ctestutil "github.com/hashicorp/nomad/client/testutil"
)
var (
nextPort uint32 = 16000
osExecDriverSupport = map[string]bool{
"linux": true,
}
)
func getPort() int {
return int(atomic.AddUint32(&nextPort, 1))
return 1030 + int(rand.Int31n(6440))
}
func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
f := false
// Setup the default settings
config := nomad.DefaultConfig()
config.VaultConfig.Enabled = &f
config.VaultConfig.Enabled = helper.BoolToPtr(false)
config.Build = "unittest"
config.DevMode = true
config.RPCAddr = &net.TCPAddr{
IP: []byte{127, 0, 0, 1},
Port: getPort(),
}
config.NodeName = fmt.Sprintf("Node %d", config.RPCAddr.Port)
// Tighten the Serf timing
config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
config.SerfConfig.MemberlistConfig.BindPort = getPort()
config.SerfConfig.MemberlistConfig.SuspicionMult = 2
config.SerfConfig.MemberlistConfig.RetransmitMult = 2
config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
@@ -70,33 +55,52 @@ func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
config.RaftConfig.StartAsLeader = true
config.RaftTimeout = 500 * time.Millisecond
logger := log.New(config.LogOutput, "", log.LstdFlags)
catalog := consul.NewMockCatalog(logger)
// Invoke the callback if any
if cb != nil {
cb(config)
}
logger := log.New(config.LogOutput, "", log.LstdFlags)
catalog := consul.NewMockCatalog(logger)
for i := 10; i >= 0; i-- {
config.RPCAddr = &net.TCPAddr{
IP: []byte{127, 0, 0, 1},
Port: getPort(),
}
config.NodeName = fmt.Sprintf("Node %d", config.RPCAddr.Port)
config.SerfConfig.MemberlistConfig.BindPort = getPort()
// Create server
server, err := nomad.NewServer(config, catalog, logger)
if err != nil {
t.Fatalf("err: %v", err)
// Create server
server, err := nomad.NewServer(config, catalog, logger)
if err == nil {
return server, config.RPCAddr.String()
} else if i == 0 {
t.Fatalf("err: %v", err)
} else {
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
time.Sleep(wait)
}
}
return server, config.RPCAddr.String()
return nil, ""
}
func testClient(t *testing.T, cb func(c *config.Config)) *Client {
f := false
conf := config.DefaultConfig()
conf.VaultConfig.Enabled = &f
conf.VaultConfig.Enabled = helper.BoolToPtr(false)
conf.DevMode = true
conf.Node = &structs.Node{
Reserved: &structs.Resources{
DiskMB: 0,
},
}
// Tighten the fingerprinter timeouts
if conf.Options == nil {
conf.Options = make(map[string]string)
}
conf.Options[fingerprint.TightenNetworkTimeoutsConfig] = "true"
if cb != nil {
cb(conf)
}
@@ -113,6 +117,7 @@ func testClient(t *testing.T, cb func(c *config.Config)) *Client {
}
func TestClient_StartStop(t *testing.T) {
t.Parallel()
client := testClient(t, nil)
if err := client.Shutdown(); err != nil {
t.Fatalf("err: %v", err)
@@ -120,6 +125,7 @@ func TestClient_StartStop(t *testing.T) {
}
func TestClient_RPC(t *testing.T) {
t.Parallel()
s1, addr := testServer(t, nil)
defer s1.Shutdown()
@@ -139,6 +145,7 @@ func TestClient_RPC(t *testing.T) {
}
func TestClient_RPC_Passthrough(t *testing.T) {
t.Parallel()
s1, _ := testServer(t, nil)
defer s1.Shutdown()
@@ -158,6 +165,7 @@ func TestClient_RPC_Passthrough(t *testing.T) {
}
func TestClient_Fingerprint(t *testing.T) {
t.Parallel()
c := testClient(t, nil)
defer c.Shutdown()
@@ -172,6 +180,7 @@ func TestClient_Fingerprint(t *testing.T) {
}
func TestClient_HasNodeChanged(t *testing.T) {
t.Parallel()
c := testClient(t, nil)
defer c.Shutdown()
@@ -203,6 +212,7 @@ func TestClient_HasNodeChanged(t *testing.T) {
}
func TestClient_Fingerprint_InWhitelist(t *testing.T) {
t.Parallel()
c := testClient(t, func(c *config.Config) {
if c.Options == nil {
c.Options = make(map[string]string)
@@ -220,6 +230,7 @@ func TestClient_Fingerprint_InWhitelist(t *testing.T) {
}
func TestClient_Fingerprint_InBlacklist(t *testing.T) {
t.Parallel()
c := testClient(t, func(c *config.Config) {
if c.Options == nil {
c.Options = make(map[string]string)
@@ -237,6 +248,7 @@ func TestClient_Fingerprint_InBlacklist(t *testing.T) {
}
func TestClient_Fingerprint_OutOfWhitelist(t *testing.T) {
t.Parallel()
c := testClient(t, func(c *config.Config) {
if c.Options == nil {
c.Options = make(map[string]string)
@@ -253,6 +265,7 @@ func TestClient_Fingerprint_OutOfWhitelist(t *testing.T) {
}
func TestClient_Fingerprint_WhitelistBlacklistCombination(t *testing.T) {
t.Parallel()
c := testClient(t, func(c *config.Config) {
if c.Options == nil {
c.Options = make(map[string]string)
@@ -281,63 +294,46 @@ func TestClient_Fingerprint_WhitelistBlacklistCombination(t *testing.T) {
}
}
func TestClient_Drivers(t *testing.T) {
c := testClient(t, nil)
defer c.Shutdown()
node := c.Node()
if node.Attributes["driver.exec"] == "" {
if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
t.Fatalf("missing exec driver")
} else {
t.Skipf("missing exec driver, no OS support")
}
}
}
func TestClient_Drivers_InWhitelist(t *testing.T) {
t.Parallel()
c := testClient(t, func(c *config.Config) {
if c.Options == nil {
c.Options = make(map[string]string)
}
// Weird spacing to test trimming
c.Options["driver.whitelist"] = " exec , foo "
c.Options["driver.raw_exec.enable"] = "1"
c.Options["driver.whitelist"] = " raw_exec , foo "
})
defer c.Shutdown()
node := c.Node()
if node.Attributes["driver.exec"] == "" {
if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
t.Fatalf("missing exec driver")
} else {
t.Skipf("missing exec driver, no OS support")
}
if node.Attributes["driver.raw_exec"] == "" {
t.Fatalf("missing raw_exec driver")
}
}
func TestClient_Drivers_InBlacklist(t *testing.T) {
t.Parallel()
c := testClient(t, func(c *config.Config) {
if c.Options == nil {
c.Options = make(map[string]string)
}
// Weird spacing to test trimming
c.Options["driver.blacklist"] = " exec , foo "
c.Options["driver.raw_exec.enable"] = "1"
c.Options["driver.blacklist"] = " raw_exec , foo "
})
defer c.Shutdown()
node := c.Node()
if node.Attributes["driver.exec"] != "" {
if v, ok := osExecDriverSupport[runtime.GOOS]; !v && ok {
t.Fatalf("exec driver loaded despite blacklist")
} else {
t.Skipf("missing exec driver, no OS support")
}
if node.Attributes["driver.raw_exec"] != "" {
t.Fatalf("raw_exec driver loaded despite blacklist")
}
}
func TestClient_Drivers_OutOfWhitelist(t *testing.T) {
t.Parallel()
c := testClient(t, func(c *config.Config) {
if c.Options == nil {
c.Options = make(map[string]string)
@@ -354,6 +350,7 @@ func TestClient_Drivers_OutOfWhitelist(t *testing.T) {
}
func TestClient_Drivers_WhitelistBlacklistCombination(t *testing.T) {
t.Parallel()
c := testClient(t, func(c *config.Config) {
if c.Options == nil {
c.Options = make(map[string]string)
@@ -379,6 +376,7 @@ func TestClient_Drivers_WhitelistBlacklistCombination(t *testing.T) {
// TestClient_MixedTLS asserts that when a server is running with TLS enabled
// it will reject any RPC connections from clients that lack TLS. See #2525
func TestClient_MixedTLS(t *testing.T) {
t.Parallel()
const (
cafile = "../helper/tlsutil/testdata/ca.pem"
foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
@@ -425,6 +423,7 @@ func TestClient_MixedTLS(t *testing.T) {
// enabled -- but their certificates are signed by different CAs -- they're
// unable to communicate.
func TestClient_BadTLS(t *testing.T) {
t.Parallel()
const (
cafile = "../helper/tlsutil/testdata/ca.pem"
foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
@@ -479,6 +478,7 @@ func TestClient_BadTLS(t *testing.T) {
}
func TestClient_Register(t *testing.T) {
t.Parallel()
s1, _ := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -510,6 +510,7 @@ func TestClient_Register(t *testing.T) {
}
func TestClient_Heartbeat(t *testing.T) {
t.Parallel()
s1, _ := testServer(t, func(c *nomad.Config) {
c.MinHeartbeatTTL = 50 * time.Millisecond
})
@@ -543,6 +544,7 @@ func TestClient_Heartbeat(t *testing.T) {
}
func TestClient_UpdateAllocStatus(t *testing.T) {
t.Parallel()
s1, _ := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -592,6 +594,7 @@ func TestClient_UpdateAllocStatus(t *testing.T) {
}
func TestClient_WatchAllocs(t *testing.T) {
t.Parallel()
ctestutil.ExecCompatible(t)
s1, _ := testServer(t, nil)
defer s1.Shutdown()
@@ -690,6 +693,7 @@ func waitTilNodeReady(client *Client, t *testing.T) {
}
func TestClient_SaveRestoreState(t *testing.T) {
t.Parallel()
ctestutil.ExecCompatible(t)
s1, _ := testServer(t, nil)
defer s1.Shutdown()
@@ -783,6 +787,7 @@ func TestClient_SaveRestoreState(t *testing.T) {
}
func TestClient_Init(t *testing.T) {
t.Parallel()
dir, err := ioutil.TempDir("", "nomad")
if err != nil {
t.Fatalf("err: %s", err)
@@ -806,6 +811,7 @@ func TestClient_Init(t *testing.T) {
}
func TestClient_BlockedAllocations(t *testing.T) {
t.Parallel()
s1, _ := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -911,6 +917,7 @@ func TestClient_BlockedAllocations(t *testing.T) {
}
func TestClient_UnarchiveAllocDir(t *testing.T) {
t.Parallel()
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("err: %v", err)

View File

@@ -178,6 +178,7 @@ func (h *testHarness) stop() {
}
func TestTaskTemplateManager_Invalid(t *testing.T) {
t.Parallel()
hooks := NewMockTaskHooks()
var tmpls []*structs.Template
region := "global"
@@ -235,6 +236,7 @@ func TestTaskTemplateManager_Invalid(t *testing.T) {
}
func TestTaskTemplateManager_HostPath(t *testing.T) {
t.Parallel()
// Make a template that will render immediately and write it to a tmp file
f, err := ioutil.TempFile("", "")
if err != nil {
@@ -288,6 +290,7 @@ func TestTaskTemplateManager_HostPath(t *testing.T) {
}
func TestTaskTemplateManager_Unblock_Static(t *testing.T) {
t.Parallel()
// Make a template that will render immediately
content := "hello, world!"
file := "my.tmpl"
@@ -321,6 +324,7 @@ func TestTaskTemplateManager_Unblock_Static(t *testing.T) {
}
func TestTaskTemplateManager_Permissions(t *testing.T) {
t.Parallel()
// Make a template that will render immediately
content := "hello, world!"
file := "my.tmpl"
@@ -355,6 +359,7 @@ func TestTaskTemplateManager_Permissions(t *testing.T) {
}
func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) {
t.Parallel()
// Make a template that will render immediately
content := `Hello Nomad Task: {{env "NOMAD_TASK_NAME"}}`
expected := fmt.Sprintf("Hello Nomad Task: %s", TestTaskName)
@@ -389,6 +394,7 @@ func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) {
}
func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) {
t.Parallel()
// Make a template that will render immediately
content := "hello, world!"
file := "my.tmpl"
@@ -429,6 +435,7 @@ func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) {
}
func TestTaskTemplateManager_Unblock_Consul(t *testing.T) {
t.Parallel()
// Make a template that will render based on a key in Consul
key := "foo"
content := "barbaz"
@@ -477,6 +484,7 @@ func TestTaskTemplateManager_Unblock_Consul(t *testing.T) {
}
func TestTaskTemplateManager_Unblock_Vault(t *testing.T) {
t.Parallel()
// Make a template that will render based on a key in Vault
vaultPath := "secret/password"
key := "password"
@@ -527,6 +535,7 @@ func TestTaskTemplateManager_Unblock_Vault(t *testing.T) {
}
func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) {
t.Parallel()
// Make a template that will render immediately
staticContent := "hello, world!"
staticFile := "my.tmpl"
@@ -595,6 +604,7 @@ func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) {
}
func TestTaskTemplateManager_Rerender_Noop(t *testing.T) {
t.Parallel()
// Make a template that will render based on a key in Consul
key := "foo"
content1 := "bar"
@@ -666,6 +676,7 @@ func TestTaskTemplateManager_Rerender_Noop(t *testing.T) {
}
func TestTaskTemplateManager_Rerender_Signal(t *testing.T) {
t.Parallel()
// Make a template that renders based on a key in Consul and sends SIGALRM
key1 := "foo"
content1_1 := "bar"
@@ -765,6 +776,7 @@ OUTER:
}
func TestTaskTemplateManager_Rerender_Restart(t *testing.T) {
t.Parallel()
// Make a template that renders based on a key in Consul and sends restart
key1 := "bam"
content1_1 := "cat"
@@ -831,6 +843,7 @@ OUTER:
}
func TestTaskTemplateManager_Interpolate_Destination(t *testing.T) {
t.Parallel()
// Make a template that will have its destination interpolated
content := "hello, world!"
file := "${node.unique.id}.tmpl"
@@ -865,6 +878,7 @@ func TestTaskTemplateManager_Interpolate_Destination(t *testing.T) {
}
func TestTaskTemplateManager_Signal_Error(t *testing.T) {
t.Parallel()
// Make a template that renders based on a key in Consul and sends SIGALRM
key1 := "foo"
content1 := "bar"
@@ -916,6 +930,7 @@ func TestTaskTemplateManager_Signal_Error(t *testing.T) {
// TestTaskTemplateManager_Env asserts templates with the env flag set are read
// into the task's environment.
func TestTaskTemplateManager_Env(t *testing.T) {
t.Parallel()
template := &structs.Template{
EmbeddedTmpl: `
# Comment lines are ok
@@ -958,6 +973,7 @@ ANYTHING_goes=Spaces are=ok!
// TestTaskTemplateManager_Env_Missing asserts the core env
// template processing function returns errors when files don't exist
func TestTaskTemplateManager_Env_Missing(t *testing.T) {
t.Parallel()
d, err := ioutil.TempDir("", "ct_env_missing")
if err != nil {
t.Fatalf("err: %v", err)
@@ -992,6 +1008,7 @@ func TestTaskTemplateManager_Env_Missing(t *testing.T) {
// template processing function returns combined env vars from multiple
// templates correctly.
func TestTaskTemplateManager_Env_Multi(t *testing.T) {
t.Parallel()
d, err := ioutil.TempDir("", "ct_env_missing")
if err != nil {
t.Fatalf("err: %v", err)
@@ -1038,6 +1055,7 @@ func TestTaskTemplateManager_Env_Multi(t *testing.T) {
// TestTaskTemplateManager_Config_ServerName asserts the tls_server_name
// setting is propogated to consul-template's configuration. See #2776
func TestTaskTemplateManager_Config_ServerName(t *testing.T) {
t.Parallel()
c := config.DefaultConfig()
c.VaultConfig = &sconfig.VaultConfig{
Enabled: helper.BoolToPtr(true),

View File

@@ -70,7 +70,7 @@ func TestDockerCoordinator_ConcurrentPulls(t *testing.T) {
testutil.WaitForResult(func() (bool, error) {
p := mock.pulled[image]
if p != 1 {
if p >= 10 {
return false, fmt.Errorf("Wrong number of pulls: %d", p)
}

View File

@@ -42,8 +42,8 @@ func dockerIsRemote(t *testing.T) bool {
// Ports used by tests
var (
docker_reserved = 32768 + int(rand.Int31n(25000))
docker_dynamic = 32768 + int(rand.Int31n(25000))
docker_reserved = 2000 + int(rand.Int31n(10000))
docker_dynamic = 2000 + int(rand.Int31n(10000))
)
// Returns a task with a reserved and dynamic port. The ports are returned
@@ -158,7 +158,9 @@ func newTestDockerClient(t *testing.T) *docker.Client {
// This test should always pass, even if docker daemon is not available
func TestDockerDriver_Fingerprint(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
ctx := testDockerDriverContexts(t, &structs.Task{Name: "foo", Driver: "docker", Resources: basicResources})
//ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer ctx.AllocDir.Destroy()
@@ -182,7 +184,9 @@ func TestDockerDriver_Fingerprint(t *testing.T) {
// TestDockerDriver_Fingerprint_Bridge asserts that if Docker is running we set
// the bridge network's IP as a node attribute. See #2785
func TestDockerDriver_Fingerprint_Bridge(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
if !testutil.DockerIsConnected(t) {
t.Skip("requires Docker")
}
@@ -215,7 +219,9 @@ func TestDockerDriver_Fingerprint_Bridge(t *testing.T) {
}
func TestDockerDriver_StartOpen_Wait(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
if !testutil.DockerIsConnected(t) {
t.SkipNow()
}
@@ -267,7 +273,9 @@ func TestDockerDriver_StartOpen_Wait(t *testing.T) {
}
func TestDockerDriver_Start_Wait(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task := &structs.Task{
Name: "nc-demo",
Driver: "docker",
@@ -307,7 +315,9 @@ func TestDockerDriver_Start_Wait(t *testing.T) {
}
func TestDockerDriver_Start_LoadImage(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
if !testutil.DockerIsConnected(t) {
t.SkipNow()
}
@@ -374,7 +384,9 @@ func TestDockerDriver_Start_LoadImage(t *testing.T) {
}
func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
if !testutil.DockerIsConnected(t) {
t.SkipNow()
}
@@ -416,7 +428,9 @@ func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
}
func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
// This test requires that the alloc dir be mounted into docker as a volume.
// Because this cannot happen when docker is run remotely, e.g. when running
// docker in a VM, we skip this when we detect Docker is being run remotely.
@@ -487,7 +501,9 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
}
func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task := &structs.Task{
Name: "nc-demo",
Driver: "docker",
@@ -526,7 +542,9 @@ func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
}
func TestDockerDriver_StartN(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
if !testutil.DockerIsConnected(t) {
t.SkipNow()
}
@@ -578,7 +596,9 @@ func TestDockerDriver_StartN(t *testing.T) {
}
func TestDockerDriver_StartNVersions(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
if !testutil.DockerIsConnected(t) {
t.SkipNow()
}
@@ -656,7 +676,9 @@ func waitForExist(t *testing.T, client *docker.Client, handle *DockerHandle) {
}
func TestDockerDriver_NetworkMode_Host(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
expected := "host"
task := &structs.Task{
@@ -696,7 +718,9 @@ func TestDockerDriver_NetworkMode_Host(t *testing.T) {
}
func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
// Because go-dockerclient doesn't provide api for query network aliases, just check that
// a container can be created with a 'network_aliases' property
@@ -743,7 +767,9 @@ func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) {
}
func TestDockerDriver_Labels(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task, _, _ := dockerTask()
task.Config["labels"] = []map[string]string{
map[string]string{
@@ -772,7 +798,9 @@ func TestDockerDriver_Labels(t *testing.T) {
}
func TestDockerDriver_ForcePull_IsInvalidConfig(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task, _, _ := dockerTask()
task.Config["force_pull"] = "nothing"
@@ -787,7 +815,9 @@ func TestDockerDriver_ForcePull_IsInvalidConfig(t *testing.T) {
}
func TestDockerDriver_ForcePull(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task, _, _ := dockerTask()
task.Config["force_pull"] = "true"
@@ -803,7 +833,9 @@ func TestDockerDriver_ForcePull(t *testing.T) {
}
func TestDockerDriver_SecurityOpt(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task, _, _ := dockerTask()
task.Config["security_opt"] = []string{"seccomp=unconfined"}
@@ -823,7 +855,9 @@ func TestDockerDriver_SecurityOpt(t *testing.T) {
}
func TestDockerDriver_DNS(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task, _, _ := dockerTask()
task.Config["dns_servers"] = []string{"8.8.8.8", "8.8.4.4"}
task.Config["dns_search_domains"] = []string{"example.com", "example.org", "example.net"}
@@ -848,7 +882,9 @@ func TestDockerDriver_DNS(t *testing.T) {
}
func TestDockerDriver_MACAddress(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task, _, _ := dockerTask()
task.Config["mac_address"] = "00:16:3e:00:00:00"
@@ -868,7 +904,9 @@ func TestDockerDriver_MACAddress(t *testing.T) {
}
func TestDockerWorkDir(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task, _, _ := dockerTask()
task.Config["work_dir"] = "/some/path"
@@ -895,7 +933,9 @@ func inSlice(needle string, haystack []string) bool {
}
func TestDockerDriver_PortsNoMap(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task, res, dyn := dockerTask()
client, handle, cleanup := dockerSetup(t, task)
@@ -946,7 +986,9 @@ func TestDockerDriver_PortsNoMap(t *testing.T) {
}
func TestDockerDriver_PortsMapping(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task, res, dyn := dockerTask()
task.Config["port_map"] = []map[string]string{
map[string]string{
@@ -992,7 +1034,7 @@ func TestDockerDriver_PortsMapping(t *testing.T) {
expectedEnvironment := map[string]string{
"NOMAD_PORT_main": "8080",
"NOMAD_PORT_REDIS": "6379",
"NOMAD_HOST_PORT_main": strconv.Itoa(docker_reserved),
"NOMAD_HOST_PORT_main": strconv.Itoa(res),
}
sort.Strings(container.Config.Env)
@@ -1005,7 +1047,9 @@ func TestDockerDriver_PortsMapping(t *testing.T) {
}
func TestDockerDriver_User(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task := &structs.Task{
Name: "redis-demo",
User: "alice",
@@ -1055,7 +1099,9 @@ func TestDockerDriver_User(t *testing.T) {
}
func TestDockerDriver_CleanupContainer(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task := &structs.Task{
Name: "redis-demo",
Driver: "docker",
@@ -1104,7 +1150,9 @@ func TestDockerDriver_CleanupContainer(t *testing.T) {
}
func TestDockerDriver_Stats(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
task := &structs.Task{
Name: "sleep",
Driver: "docker",
@@ -1213,7 +1261,9 @@ func setupDockerVolumes(t *testing.T, cfg *config.Config, hostpath string) (*str
}
func TestDockerDriver_VolumesDisabled(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
cfg := testConfig()
cfg.Options = map[string]string{
dockerVolumesConfigOption: "false",
@@ -1284,7 +1334,9 @@ func TestDockerDriver_VolumesDisabled(t *testing.T) {
}
func TestDockerDriver_VolumesEnabled(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
cfg := testConfig()
tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesenabled")
@@ -1321,7 +1373,9 @@ func TestDockerDriver_VolumesEnabled(t *testing.T) {
// TestDockerDriver_Cleanup ensures Cleanup removes only downloaded images.
func TestDockerDriver_Cleanup(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
if !testutil.DockerIsConnected(t) {
t.SkipNow()
}
@@ -1383,7 +1437,9 @@ func copyImage(t *testing.T, taskDir *allocdir.TaskDir, image string) {
}
func TestDockerDriver_AuthConfiguration(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
path := "./test-resources/docker/auth.json"
cases := []struct {
Repo string

View File

@@ -16,7 +16,9 @@ import (
)
func TestDockerDriver_Signal(t *testing.T) {
t.Parallel()
if !tu.IsTravis() {
t.Parallel()
}
if !testutil.DockerIsConnected(t) {
t.SkipNow()
}

View File

@@ -48,7 +48,9 @@ func TestExecDriver_Fingerprint(t *testing.T) {
}
func TestExecDriver_StartOpen_Wait(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "sleep",
@@ -90,7 +92,9 @@ func TestExecDriver_StartOpen_Wait(t *testing.T) {
}
func TestExecDriver_Start_Wait(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "sleep",
@@ -136,7 +140,9 @@ func TestExecDriver_Start_Wait(t *testing.T) {
}
func TestExecDriver_Start_Wait_AllocDir(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
ctestutils.ExecCompatible(t)
exp := []byte{'w', 'i', 'n'}
@@ -193,7 +199,9 @@ func TestExecDriver_Start_Wait_AllocDir(t *testing.T) {
}
func TestExecDriver_Start_Kill_Wait(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "sleep",
@@ -242,7 +250,9 @@ func TestExecDriver_Start_Kill_Wait(t *testing.T) {
}
func TestExecDriverUser(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "sleep",
@@ -281,7 +291,9 @@ func TestExecDriverUser(t *testing.T) {
// TestExecDriver_HandlerExec ensures the exec driver's handle properly
// executes commands inside the container.
func TestExecDriver_HandlerExec(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "sleep",

View File

@@ -19,7 +19,9 @@ import (
)
func TestExecDriver_KillUserPid_OnPluginReconnectFailure(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "sleep",
@@ -89,7 +91,9 @@ func TestExecDriver_KillUserPid_OnPluginReconnectFailure(t *testing.T) {
}
func TestExecDriver_Signal(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "signal",

View File

@@ -68,7 +68,9 @@ func TestJavaDriver_Fingerprint(t *testing.T) {
}
func TestJavaDriver_StartOpen_Wait(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
if !javaLocated() {
t.Skip("Java not found; skipping")
}
@@ -122,7 +124,9 @@ func TestJavaDriver_StartOpen_Wait(t *testing.T) {
}
func TestJavaDriver_Start_Wait(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
if !javaLocated() {
t.Skip("Java not found; skipping")
}
@@ -133,6 +137,7 @@ func TestJavaDriver_Start_Wait(t *testing.T) {
Driver: "java",
Config: map[string]interface{}{
"jar_path": "demoapp.jar",
"args": []string{"1"},
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
@@ -157,15 +162,14 @@ func TestJavaDriver_Start_Wait(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Task should terminate quickly
// Task should terminate after 1 seconds
select {
case res := <-resp.Handle.WaitCh():
if !res.Successful() {
t.Fatalf("err: %v", res)
t.Fatal("err: %v", res.String())
}
case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second):
// expect the timeout b/c it's a long lived process
break
case <-time.After(5 * time.Second):
t.Fatalf("timeout")
}
// Get the stdout of the process and assrt that it's not empty
@@ -186,7 +190,9 @@ func TestJavaDriver_Start_Wait(t *testing.T) {
}
func TestJavaDriver_Start_Kill_Wait(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
if !javaLocated() {
t.Skip("Java not found; skipping")
}
@@ -246,7 +252,9 @@ func TestJavaDriver_Start_Kill_Wait(t *testing.T) {
}
func TestJavaDriver_Signal(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
if !javaLocated() {
t.Skip("Java not found; skipping")
}
@@ -305,8 +313,10 @@ func TestJavaDriver_Signal(t *testing.T) {
}
}
func TestJavaDriverUser(t *testing.T) {
t.Parallel()
func TestJavaDriver_User(t *testing.T) {
if !testutil.IsTravis() {
t.Parallel()
}
if !javaLocated() {
t.Skip("Java not found; skipping")
}
@@ -345,7 +355,9 @@ func TestJavaDriverUser(t *testing.T) {
}
func TestJavaDriver_Start_Wait_Class(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
if !javaLocated() {
t.Skip("Java not found; skipping")
}
@@ -357,6 +369,7 @@ func TestJavaDriver_Start_Wait_Class(t *testing.T) {
Config: map[string]interface{}{
"class_path": "${NOMAD_TASK_DIR}",
"class": "Hello",
"args": []string{"1"},
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
@@ -381,15 +394,14 @@ func TestJavaDriver_Start_Wait_Class(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Task should terminate quickly
// Task should terminate after 1 seconds
select {
case res := <-resp.Handle.WaitCh():
if !res.Successful() {
t.Fatalf("err: %v", res)
t.Fatal("err: %v", res.String())
}
case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second):
// expect the timeout b/c it's a long lived process
break
case <-time.After(5 * time.Second):
t.Fatalf("timeout")
}
// Get the stdout of the process and assrt that it's not empty

View File

@@ -55,7 +55,9 @@ func TestLxcDriver_Fingerprint(t *testing.T) {
}
func TestLxcDriver_Start_Wait(t *testing.T) {
t.Parallel()
if testutil.IsTravis() {
t.Parallel()
}
if !lxcPresent(t) {
t.Skip("lxc not present")
}
@@ -129,7 +131,9 @@ func TestLxcDriver_Start_Wait(t *testing.T) {
}
func TestLxcDriver_Open_Wait(t *testing.T) {
t.Parallel()
if testutil.IsTravis() {
t.Parallel()
}
if !lxcPresent(t) {
t.Skip("lxc not present")
}

View File

@@ -200,7 +200,7 @@ func (h *mockDriverHandle) ID() string {
TaskName: h.taskName,
RunFor: h.runFor,
KillAfter: h.killAfter,
KillTimeout: h.killAfter,
KillTimeout: h.killTimeout,
ExitCode: h.exitCode,
ExitSignal: h.exitSignal,
ExitErr: h.exitErr,
@@ -250,6 +250,7 @@ func (h *mockDriverHandle) Exec(ctx context.Context, cmd string, args []string)
// TODO Implement when we need it.
func (h *mockDriverHandle) Update(task *structs.Task) error {
h.killTimeout = task.KillTimeout
return nil
}

View File

@@ -9,13 +9,16 @@ import (
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
ctestutils "github.com/hashicorp/nomad/client/testutil"
)
// The fingerprinter test should always pass, even if QEMU is not installed.
func TestQemuDriver_Fingerprint(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
ctestutils.QemuCompatible(t)
task := &structs.Task{
Name: "foo",
@@ -45,7 +48,9 @@ func TestQemuDriver_Fingerprint(t *testing.T) {
}
func TestQemuDriver_StartOpen_Wait(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
ctestutils.QemuCompatible(t)
task := &structs.Task{
Name: "linux",
@@ -112,7 +117,9 @@ func TestQemuDriver_StartOpen_Wait(t *testing.T) {
}
func TestQemuDriverUser(t *testing.T) {
t.Parallel()
if !testutil.IsTravis() {
t.Parallel()
}
ctestutils.QemuCompatible(t)
task := &structs.Task{
Name: "linux",

View File

@@ -56,7 +56,7 @@ const (
rktCmd = "rkt"
// rktUuidDeadline is how long to wait for the uuid file to be written
rktUuidDeadline = 5 * time.Second
rktUuidDeadline = 15 * time.Second
)
// RktDriver is a driver for running images via Rkt

View File

@@ -72,7 +72,9 @@ func TestRktDriver_Fingerprint(t *testing.T) {
}
func TestRktDriver_Start_DNS(t *testing.T) {
t.Parallel()
if testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
@@ -124,7 +126,9 @@ func TestRktDriver_Start_DNS(t *testing.T) {
}
func TestRktDriver_Start_Wait(t *testing.T) {
t.Parallel()
if testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
@@ -184,7 +188,9 @@ func TestRktDriver_Start_Wait(t *testing.T) {
}
func TestRktDriver_Start_Wait_Skip_Trust(t *testing.T) {
t.Parallel()
if testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
@@ -238,7 +244,9 @@ func TestRktDriver_Start_Wait_Skip_Trust(t *testing.T) {
}
func TestRktDriver_Start_Wait_AllocDir(t *testing.T) {
t.Parallel()
if testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
@@ -311,7 +319,9 @@ func TestRktDriver_Start_Wait_AllocDir(t *testing.T) {
}
func TestRktDriverUser(t *testing.T) {
t.Parallel()
if testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
@@ -356,7 +366,9 @@ func TestRktDriverUser(t *testing.T) {
}
func TestRktTrustPrefix(t *testing.T) {
t.Parallel()
if testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
@@ -424,7 +436,9 @@ func TestRktTaskValidate(t *testing.T) {
// TODO: Port Mapping test should be ran with proper ACI image and test the port access.
func TestRktDriver_PortsMapping(t *testing.T) {
t.Parallel()
if testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
@@ -489,7 +503,9 @@ func TestRktDriver_PortsMapping(t *testing.T) {
}
func TestRktDriver_HandlerExec(t *testing.T) {
t.Parallel()
if testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}

View File

@@ -1,12 +1,14 @@
public class Hello {
public static void main(String[] args) {
while (true) {
System.out.println("Hello");
try {
Thread.sleep(1000); //1000 milliseconds is one second.
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
System.out.println("Hello");
int seconds = 5;
if (args.length != 0) {
seconds = Integer.parseInt(args[0]);
}
try {
Thread.sleep(1000*seconds); //1000 milliseconds is one second.
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
}

View File

@@ -20,6 +20,7 @@ func gcConfig() *GCConfig {
}
func TestIndexedGCAllocPQ(t *testing.T) {
t.Parallel()
pq := NewIndexedGCAllocPQ()
_, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false)
@@ -100,6 +101,7 @@ func (m *MockStatsCollector) Stats() *stats.HostStats {
}
func TestAllocGarbageCollector_MarkForCollection(t *testing.T) {
t.Parallel()
logger := testLogger()
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
@@ -115,6 +117,7 @@ func TestAllocGarbageCollector_MarkForCollection(t *testing.T) {
}
func TestAllocGarbageCollector_Collect(t *testing.T) {
t.Parallel()
logger := testLogger()
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
@@ -141,6 +144,7 @@ func TestAllocGarbageCollector_Collect(t *testing.T) {
}
func TestAllocGarbageCollector_CollectAll(t *testing.T) {
t.Parallel()
logger := testLogger()
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
@@ -163,6 +167,7 @@ func TestAllocGarbageCollector_CollectAll(t *testing.T) {
}
func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) {
t.Parallel()
logger := testLogger()
statsCollector := &MockStatsCollector{}
conf := gcConfig()
@@ -201,6 +206,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T)
}
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) {
t.Parallel()
logger := testLogger()
statsCollector := &MockStatsCollector{}
conf := gcConfig()
@@ -240,6 +246,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) {
}
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) {
t.Parallel()
logger := testLogger()
statsCollector := &MockStatsCollector{}
conf := gcConfig()
@@ -275,6 +282,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) {
}
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) {
t.Parallel()
logger := testLogger()
statsCollector := &MockStatsCollector{}
conf := gcConfig()
@@ -309,6 +317,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T)
}
func TestAllocGarbageCollector_MakeRoomForAllocations_MaxAllocs(t *testing.T) {
t.Parallel()
const (
liveAllocs = 3
maxAllocs = 6
@@ -346,6 +355,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_MaxAllocs(t *testing.T) {
}
func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) {
t.Parallel()
logger := testLogger()
statsCollector := &MockStatsCollector{}
conf := gcConfig()
@@ -381,6 +391,7 @@ func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) {
}
func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) {
t.Parallel()
logger := testLogger()
statsCollector := &MockStatsCollector{}
conf := gcConfig()
@@ -418,6 +429,7 @@ func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) {
}
func TestAllocGarbageCollector_MaxAllocsThreshold(t *testing.T) {
t.Parallel()
const (
liveAllocs = 3
maxAllocs = 6

View File

@@ -8,6 +8,7 @@ import (
)
func TestServerList(t *testing.T) {
t.Parallel()
s := newServerList()
// New lists should be empty
@@ -89,6 +90,7 @@ func TestServerList(t *testing.T) {
// TestClient_ServerList tests client methods that interact with the internal
// nomad server list.
func TestClient_ServerList(t *testing.T) {
t.Parallel()
// manually create a mostly empty client to avoid spinning up a ton of
// goroutines that complicate testing
client := Client{servers: newServerList(), logger: log.New(os.Stderr, "", log.Ltime|log.Lshortfile)}

View File

@@ -22,8 +22,6 @@ import (
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
ctestutil "github.com/hashicorp/nomad/client/testutil"
)
func testLogger() *log.Logger {
@@ -68,7 +66,12 @@ func (ctx *taskRunnerTestCtx) Cleanup() {
}
func testTaskRunner(t *testing.T, restarts bool) *taskRunnerTestCtx {
return testTaskRunnerFromAlloc(t, restarts, mock.Alloc())
// Use mock driver
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
task.Config["run_for"] = "500ms"
return testTaskRunnerFromAlloc(t, restarts, alloc)
}
// Creates a mock task runner using the first task in the first task group of
@@ -160,7 +163,7 @@ func testWaitForTaskToStart(t *testing.T, ctx *taskRunnerTestCtx) {
}
func TestTaskRunner_SimpleRun(t *testing.T) {
ctestutil.ExecCompatible(t)
t.Parallel()
ctx := testTaskRunner(t, false)
ctx.tr.MarkReceived()
go ctx.tr.Run()
@@ -198,6 +201,7 @@ func TestTaskRunner_SimpleRun(t *testing.T) {
}
func TestTaskRunner_Run_RecoverableStartError(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -240,27 +244,22 @@ func TestTaskRunner_Run_RecoverableStartError(t *testing.T) {
}
func TestTaskRunner_Destroy(t *testing.T) {
ctestutil.ExecCompatible(t)
ctx := testTaskRunner(t, true)
ctx.tr.MarkReceived()
//FIXME This didn't used to send a kill status update!!!???
defer ctx.Cleanup()
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
task.Config = map[string]interface{}{
"run_for": "1000s",
}
// Change command to ensure we run for a bit
ctx.tr.task.Config["command"] = "/bin/sleep"
ctx.tr.task.Config["args"] = []string{"1000"}
ctx := testTaskRunnerFromAlloc(t, true, alloc)
ctx.tr.MarkReceived()
go ctx.tr.Run()
defer ctx.Cleanup()
// Wait for the task to start
testWaitForTaskToStart(t, ctx)
// Make sure we are collecting a few stats
time.Sleep(2 * time.Second)
stats := ctx.tr.LatestResourceUsage()
if len(stats.Pids) == 0 || stats.ResourceUsage == nil || stats.ResourceUsage.MemoryStats.RSS == 0 {
t.Fatalf("expected task runner to have some stats")
}
// Begin the tear down
ctx.tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
@@ -288,13 +287,17 @@ func TestTaskRunner_Destroy(t *testing.T) {
}
func TestTaskRunner_Update(t *testing.T) {
ctestutil.ExecCompatible(t)
ctx := testTaskRunner(t, false)
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Services[0].Checks[0].Args[0] = "${NOMAD_META_foo}"
task.Driver = "mock_driver"
task.Config = map[string]interface{}{
"run_for": "100s",
}
// Change command to ensure we run for a bit
ctx.tr.task.Config["command"] = "/bin/sleep"
ctx.tr.task.Config["args"] = []string{"100"}
ctx.tr.task.Services[0].Checks[0].Args[0] = "${NOMAD_META_foo}"
ctx := testTaskRunnerFromAlloc(t, true, alloc)
ctx.tr.MarkReceived()
go ctx.tr.Run()
defer ctx.Cleanup()
@@ -314,18 +317,9 @@ func TestTaskRunner_Update(t *testing.T) {
newTask.Meta["foo"] = "UPDATE"
// Update the kill timeout
testutil.WaitForResult(func() (bool, error) {
if ctx.tr.handle == nil {
return false, fmt.Errorf("task not started")
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
testWaitForTaskToStart(t, ctx)
oldHandle := ctx.tr.handle.ID()
newTask.KillTimeout = time.Hour
ctx.tr.Update(updateAlloc)
// Wait for ctx.update to take place
@@ -364,6 +358,7 @@ func TestTaskRunner_Update(t *testing.T) {
}
func TestTaskRunner_SaveRestoreState(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -378,7 +373,6 @@ func TestTaskRunner_SaveRestoreState(t *testing.T) {
ctx := testTaskRunnerFromAlloc(t, false, alloc)
ctx.tr.MarkReceived()
go ctx.tr.Run()
//FIXME This test didn't used to defer destroy the allocidr ???!!!
defer ctx.Cleanup()
// Wait for the task to be running and then snapshot the state
@@ -424,14 +418,18 @@ func TestTaskRunner_SaveRestoreState(t *testing.T) {
}
func TestTaskRunner_Download_List(t *testing.T) {
ctestutil.ExecCompatible(t)
t.Parallel()
ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir("."))))
defer ts.Close()
// Create an allocation that has a task with a list of artifacts.
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
task.Config = map[string]interface{}{
"exit_code": "0",
"run_for": "10s",
}
f1 := "task_runner_test.go"
f2 := "task_runner.go"
artifact1 := structs.TaskArtifact{
@@ -491,11 +489,15 @@ func TestTaskRunner_Download_List(t *testing.T) {
}
func TestTaskRunner_Download_Retries(t *testing.T) {
ctestutil.ExecCompatible(t)
t.Parallel()
// Create an allocation that has a task with bad artifacts.
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
task.Config = map[string]interface{}{
"exit_code": "0",
"run_for": "10s",
}
artifact := structs.TaskArtifact{
GetterSource: "http://127.1.1.111:12315/foo/bar/baz",
}
@@ -564,8 +566,7 @@ func TestTaskRunner_Download_Retries(t *testing.T) {
// TestTaskRunner_UnregisterConsul_Retries asserts a task is unregistered from
// Consul when waiting to be retried.
func TestTaskRunner_UnregisterConsul_Retries(t *testing.T) {
ctestutil.ExecCompatible(t)
t.Parallel()
// Create an allocation that has a task with bad artifacts.
alloc := mock.Alloc()
@@ -609,7 +610,7 @@ func TestTaskRunner_UnregisterConsul_Retries(t *testing.T) {
}
func TestTaskRunner_Validate_UserEnforcement(t *testing.T) {
ctestutil.ExecCompatible(t)
t.Parallel()
ctx := testTaskRunner(t, false)
defer ctx.Cleanup()
@@ -636,6 +637,7 @@ func TestTaskRunner_Validate_UserEnforcement(t *testing.T) {
}
func TestTaskRunner_RestartTask(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -659,7 +661,7 @@ func TestTaskRunner_RestartTask(t *testing.T) {
// Wait for the task to start again
testutil.WaitForResult(func() (bool, error) {
if len(ctx.upd.events) != 8 {
t.Fatalf("task %q in alloc %q should have 8 ctx.updates: %#v", task.Name, alloc.ID, ctx.upd.events)
return false, fmt.Errorf("task %q in alloc %q should have 8 ctx.updates: %#v", task.Name, alloc.ID, ctx.upd.events)
}
return true, nil
@@ -677,7 +679,7 @@ func TestTaskRunner_RestartTask(t *testing.T) {
}
if len(ctx.upd.events) != 10 {
t.Fatalf("should have 9 ctx.updates: %#v", ctx.upd.events)
t.Fatalf("should have 10 ctx.updates: %#v", ctx.upd.events)
}
if ctx.upd.state != structs.TaskStateDead {
@@ -725,6 +727,7 @@ func TestTaskRunner_RestartTask(t *testing.T) {
}
func TestTaskRunner_KillTask(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -783,6 +786,7 @@ func TestTaskRunner_KillTask(t *testing.T) {
}
func TestTaskRunner_SignalFailure(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -806,6 +810,7 @@ func TestTaskRunner_SignalFailure(t *testing.T) {
}
func TestTaskRunner_BlockForVault(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -913,6 +918,7 @@ func TestTaskRunner_BlockForVault(t *testing.T) {
}
func TestTaskRunner_DeriveToken_Retry(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -998,6 +1004,7 @@ func TestTaskRunner_DeriveToken_Retry(t *testing.T) {
}
func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -1044,6 +1051,7 @@ func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) {
}
func TestTaskRunner_Template_Block(t *testing.T) {
t.Parallel()
testRetryRate = 2 * time.Second
defer func() {
testRetryRate = 0
@@ -1125,6 +1133,7 @@ func TestTaskRunner_Template_Block(t *testing.T) {
}
func TestTaskRunner_Template_Artifact(t *testing.T) {
t.Parallel()
dir, err := os.Getwd()
if err != nil {
t.Fatalf("bad: %v", err)
@@ -1204,6 +1213,7 @@ func TestTaskRunner_Template_Artifact(t *testing.T) {
}
func TestTaskRunner_Template_NewVaultToken(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -1282,6 +1292,7 @@ func TestTaskRunner_Template_NewVaultToken(t *testing.T) {
}
func TestTaskRunner_VaultManager_Restart(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -1357,6 +1368,7 @@ func TestTaskRunner_VaultManager_Restart(t *testing.T) {
}
func TestTaskRunner_VaultManager_Signal(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -1418,6 +1430,7 @@ func TestTaskRunner_VaultManager_Signal(t *testing.T) {
// Test that the payload is written to disk
func TestTaskRunner_SimpleRun_Dispatch(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -1486,6 +1499,7 @@ func TestTaskRunner_SimpleRun_Dispatch(t *testing.T) {
// TestTaskRunner_CleanupEmpty ensures TaskRunner works when createdResources
// is empty.
func TestTaskRunner_CleanupEmpty(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -1503,6 +1517,7 @@ func TestTaskRunner_CleanupEmpty(t *testing.T) {
}
func TestTaskRunner_CleanupOK(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -1528,6 +1543,7 @@ func TestTaskRunner_CleanupOK(t *testing.T) {
}
func TestTaskRunner_CleanupFail(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
@@ -1553,6 +1569,7 @@ func TestTaskRunner_CleanupFail(t *testing.T) {
}
func TestTaskRunner_Pre06ScriptCheck(t *testing.T) {
t.Parallel()
run := func(ver, driver, checkType string, exp bool) (string, func(t *testing.T)) {
name := fmt.Sprintf("%s %s %s returns %t", ver, driver, checkType, exp)
return name, func(t *testing.T) {

View File

@@ -31,6 +31,7 @@ func testLogger() *log.Logger {
// TestConsul_Integration asserts TaskRunner properly registers and deregisters
// services and checks with Consul using an embedded Consul agent.
func TestConsul_Integration(t *testing.T) {
t.Skip("-short set; skipping")
if _, ok := driver.BuiltinDrivers["mock_driver"]; !ok {
t.Skip(`test requires mock_driver; run with "-tags nomad_test"`)
}

View File

@@ -7,7 +7,7 @@ PING_LOOP_PID=$!
trap "kill $PING_LOOP_PID" EXIT HUP INT QUIT TERM
GOTEST_FLAGS="-parallel=4" make test
GOTEST_FLAGS="-parallel=2" make test
TEST_OUTPUT=$?
kill $PING_LOOP_PID

View File

@@ -2,11 +2,12 @@ package testutil
import (
"fmt"
"math/rand"
"os"
"os/exec"
"runtime"
"sync"
"testing"
"time"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
@@ -19,22 +20,12 @@ import (
// and offers and easy API to tear itself down on test end. The only
// prerequisite is that the Vault binary is on the $PATH.
const (
// vaultStartPort is the starting port we use to bind Vault servers to
vaultStartPort uint64 = 40000
)
var (
// vaultPortOffset is used to atomically increment the port numbers.
vaultPortOffset uint64
vaultPortLock sync.Mutex
)
// TestVault wraps a test Vault server launched in dev mode, suitable for
// testing.
type TestVault struct {
cmd *exec.Cmd
t *testing.T
cmd *exec.Cmd
t *testing.T
waitCh chan error
Addr string
HTTPAddr string
@@ -95,6 +86,20 @@ func (tv *TestVault) Start() *TestVault {
tv.t.Fatalf("failed to start vault: %v", err)
}
// Start the waiter
tv.waitCh = make(chan error, 1)
go func() {
err := tv.cmd.Wait()
tv.waitCh <- err
}()
// Ensure Vault started
select {
case err := <-tv.waitCh:
tv.t.Fatal(err.Error())
case <-time.After(time.Duration(500*TestMultiplier()) * time.Millisecond):
}
tv.waitForAPI()
return tv
}
@@ -108,7 +113,9 @@ func (tv *TestVault) Stop() {
if err := tv.cmd.Process.Kill(); err != nil {
tv.t.Errorf("err: %s", err)
}
tv.cmd.Wait()
if tv.waitCh != nil {
<-tv.waitCh
}
}
// waitForAPI waits for the Vault HTTP endpoint to start
@@ -126,13 +133,8 @@ func (tv *TestVault) waitForAPI() {
})
}
// getPort returns the next available port to bind Vault against
func getPort() uint64 {
vaultPortLock.Lock()
defer vaultPortLock.Unlock()
p := vaultStartPort + vaultPortOffset
vaultPortOffset += 1
return p
func getPort() int {
return 1030 + int(rand.Int31n(6440))
}
// VaultVersion returns the Vault version as a string or an error if it couldn't