Merge pull request #1649 from hashicorp/test-fixes

fixed the exec fingerprinter test
This commit is contained in:
Diptanu Choudhury
2016-08-23 17:39:21 -05:00
committed by GitHub
5 changed files with 22 additions and 10 deletions

View File

@@ -20,6 +20,7 @@ import (
var basicResources = &structs.Resources{
CPU: 250,
MemoryMB: 256,
DiskMB: 20,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
IP: "0.0.0.0",

View File

@@ -22,7 +22,11 @@ import (
func TestExecDriver_Fingerprint(t *testing.T) {
ctestutils.ExecCompatible(t)
driverCtx, _ := testDriverContexts(&structs.Task{Name: "foo"})
task := &structs.Task{
Name: "foo",
Resources: structs.DefaultResources(),
}
driverCtx, _ := testDriverContexts(task)
d := NewExecDriver(driverCtx)
node := &structs.Node{
Attributes: map[string]string{

View File

@@ -31,7 +31,11 @@ func javaLocated() bool {
// The fingerprinter test should always pass, even if Java is not installed.
func TestJavaDriver_Fingerprint(t *testing.T) {
ctestutils.JavaCompatible(t)
driverCtx, _ := testDriverContexts(&structs.Task{Name: "foo"})
task := &structs.Task{
Name: "foo",
Resources: structs.DefaultResources(),
}
driverCtx, _ := testDriverContexts(task)
d := NewJavaDriver(driverCtx)
node := &structs.Node{
Attributes: map[string]string{

View File

@@ -71,7 +71,7 @@ func TestTaskRunner_SimpleRun(t *testing.T) {
upd, tr := testTaskRunner(false)
tr.MarkReceived()
go tr.Run()
defer tr.Destroy(structs.TaskKilled)
defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
defer tr.ctx.AllocDir.Destroy()
select {
@@ -138,7 +138,7 @@ func TestTaskRunner_Destroy(t *testing.T) {
}
// Begin the tear down
tr.Destroy(structs.TaskKilled)
tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
select {
case <-tr.WaitCh():
@@ -171,7 +171,7 @@ func TestTaskRunner_Update(t *testing.T) {
tr.task.Config["command"] = "/bin/sleep"
tr.task.Config["args"] = []string{"100"}
go tr.Run()
defer tr.Destroy(structs.TaskKilled)
defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
defer tr.ctx.AllocDir.Destroy()
// Update the task definition
@@ -225,7 +225,7 @@ func TestTaskRunner_SaveRestoreState(t *testing.T) {
tr.task.Config["command"] = "/bin/sleep"
tr.task.Config["args"] = []string{"10"}
go tr.Run()
defer tr.Destroy(structs.TaskKilled)
defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
// Snapshot state
time.Sleep(2 * time.Second)
@@ -240,7 +240,7 @@ func TestTaskRunner_SaveRestoreState(t *testing.T) {
t.Fatalf("err: %v", err)
}
go tr2.Run()
defer tr2.Destroy(structs.TaskKilled)
defer tr2.Destroy(structs.NewTaskEvent(structs.TaskKilled))
// Destroy and wait
testutil.WaitForResult(func() (bool, error) {
@@ -272,7 +272,7 @@ func TestTaskRunner_Download_List(t *testing.T) {
upd, tr := testTaskRunnerFromAlloc(false, alloc)
tr.MarkReceived()
go tr.Run()
defer tr.Destroy(structs.TaskKilled)
defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
defer tr.ctx.AllocDir.Destroy()
select {
@@ -337,7 +337,7 @@ func TestTaskRunner_Download_Retries(t *testing.T) {
upd, tr := testTaskRunnerFromAlloc(true, alloc)
tr.MarkReceived()
go tr.Run()
defer tr.Destroy(structs.TaskKilled)
defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
defer tr.ctx.AllocDir.Destroy()
select {

View File

@@ -898,7 +898,10 @@ func TestJobEndpoint_GetJob(t *testing.T) {
}
func TestJobEndpoint_GetJobSummary(t *testing.T) {
s1 := testServer(t, nil)
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)