diff --git a/client/driver/driver_test.go b/client/driver/driver_test.go index 96c490133..b8b1c2889 100644 --- a/client/driver/driver_test.go +++ b/client/driver/driver_test.go @@ -20,6 +20,7 @@ import ( var basicResources = &structs.Resources{ CPU: 250, MemoryMB: 256, + DiskMB: 20, Networks: []*structs.NetworkResource{ &structs.NetworkResource{ IP: "0.0.0.0", diff --git a/client/driver/exec_test.go b/client/driver/exec_test.go index 2f4a1a220..55e4f7b9d 100644 --- a/client/driver/exec_test.go +++ b/client/driver/exec_test.go @@ -22,7 +22,11 @@ import ( func TestExecDriver_Fingerprint(t *testing.T) { ctestutils.ExecCompatible(t) - driverCtx, _ := testDriverContexts(&structs.Task{Name: "foo"}) + task := &structs.Task{ + Name: "foo", + Resources: structs.DefaultResources(), + } + driverCtx, _ := testDriverContexts(task) d := NewExecDriver(driverCtx) node := &structs.Node{ Attributes: map[string]string{ diff --git a/client/driver/java_test.go b/client/driver/java_test.go index 1f8541776..553bb727f 100644 --- a/client/driver/java_test.go +++ b/client/driver/java_test.go @@ -31,7 +31,11 @@ func javaLocated() bool { // The fingerprinter test should always pass, even if Java is not installed. func TestJavaDriver_Fingerprint(t *testing.T) { ctestutils.JavaCompatible(t) - driverCtx, _ := testDriverContexts(&structs.Task{Name: "foo"}) + task := &structs.Task{ + Name: "foo", + Resources: structs.DefaultResources(), + } + driverCtx, _ := testDriverContexts(task) d := NewJavaDriver(driverCtx) node := &structs.Node{ Attributes: map[string]string{ diff --git a/client/task_runner_test.go b/client/task_runner_test.go index aeb3dfcc6..0fc7316c2 100644 --- a/client/task_runner_test.go +++ b/client/task_runner_test.go @@ -71,7 +71,7 @@ func TestTaskRunner_SimpleRun(t *testing.T) { upd, tr := testTaskRunner(false) tr.MarkReceived() go tr.Run() - defer tr.Destroy(structs.TaskKilled) + defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled)) defer tr.ctx.AllocDir.Destroy() select { @@ -138,7 +138,7 @@ func TestTaskRunner_Destroy(t *testing.T) { } // Begin the tear down - tr.Destroy(structs.TaskKilled) + tr.Destroy(structs.NewTaskEvent(structs.TaskKilled)) select { case <-tr.WaitCh(): @@ -171,7 +171,7 @@ func TestTaskRunner_Update(t *testing.T) { tr.task.Config["command"] = "/bin/sleep" tr.task.Config["args"] = []string{"100"} go tr.Run() - defer tr.Destroy(structs.TaskKilled) + defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled)) defer tr.ctx.AllocDir.Destroy() // Update the task definition @@ -225,7 +225,7 @@ func TestTaskRunner_SaveRestoreState(t *testing.T) { tr.task.Config["command"] = "/bin/sleep" tr.task.Config["args"] = []string{"10"} go tr.Run() - defer tr.Destroy(structs.TaskKilled) + defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled)) // Snapshot state time.Sleep(2 * time.Second) @@ -240,7 +240,7 @@ func TestTaskRunner_SaveRestoreState(t *testing.T) { t.Fatalf("err: %v", err) } go tr2.Run() - defer tr2.Destroy(structs.TaskKilled) + defer tr2.Destroy(structs.NewTaskEvent(structs.TaskKilled)) // Destroy and wait testutil.WaitForResult(func() (bool, error) { @@ -272,7 +272,7 @@ func TestTaskRunner_Download_List(t *testing.T) { upd, tr := testTaskRunnerFromAlloc(false, alloc) tr.MarkReceived() go tr.Run() - defer tr.Destroy(structs.TaskKilled) + defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled)) defer tr.ctx.AllocDir.Destroy() select { @@ -337,7 +337,7 @@ func TestTaskRunner_Download_Retries(t *testing.T) { upd, tr := testTaskRunnerFromAlloc(true, alloc) tr.MarkReceived() go tr.Run() - defer tr.Destroy(structs.TaskKilled) + defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled)) defer tr.ctx.AllocDir.Destroy() select { diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 470c02b93..9d843622e 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -898,7 +898,10 @@ func TestJobEndpoint_GetJob(t *testing.T) { } func TestJobEndpoint_GetJobSummary(t *testing.T) { - s1 := testServer(t, nil) + s1 := testServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC)