Merge pull request #2892 from hashicorp/f-parallel-nomad

Parallel Nomad pkg tests and more reliable test Vault
This commit is contained in:
Alex Dadgar
2017-07-25 17:43:00 -07:00
committed by GitHub
31 changed files with 511 additions and 72 deletions

View File

@@ -136,7 +136,7 @@ func newTestHarness(t *testing.T, templates []*structs.Template, consul, vault b
}
if vault {
harness.vault = testutil.NewTestVault(t).Start()
harness.vault = testutil.NewTestVault(t)
harness.config.VaultConfig = harness.vault.Config
harness.vaultToken = harness.vault.RootToken
}

View File

@@ -9,7 +9,7 @@ import (
)
func TestVaultFingerprint(t *testing.T) {
tv := testutil.NewTestVault(t).Start()
tv := testutil.NewTestVault(t)
defer tv.Stop()
fp := NewVaultFingerprint(testLogger())

View File

@@ -13,7 +13,7 @@ import (
func TestVaultClient_TokenRenewals(t *testing.T) {
t.Parallel()
v := testutil.NewTestVault(t).Start()
v := testutil.NewTestVault(t)
defer v.Stop()
logger := log.New(os.Stderr, "TEST: ", log.Lshortfile|log.LstdFlags)

View File

@@ -12,6 +12,7 @@ import (
)
func TestAllocEndpoint_List(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -70,6 +71,7 @@ func TestAllocEndpoint_List(t *testing.T) {
}
func TestAllocEndpoint_List_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -143,6 +145,7 @@ func TestAllocEndpoint_List_Blocking(t *testing.T) {
}
func TestAllocEndpoint_GetAlloc(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -176,6 +179,7 @@ func TestAllocEndpoint_GetAlloc(t *testing.T) {
}
func TestAllocEndpoint_GetAlloc_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -230,6 +234,7 @@ func TestAllocEndpoint_GetAlloc_Blocking(t *testing.T) {
}
func TestAllocEndpoint_GetAllocs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -276,6 +281,7 @@ func TestAllocEndpoint_GetAllocs(t *testing.T) {
}
func TestAllocEndpoint_GetAllocs_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()

View File

@@ -20,6 +20,7 @@ func testBlockedEvals(t *testing.T) (*BlockedEvals, *EvalBroker) {
}
func TestBlockedEvals_Block_Disabled(t *testing.T) {
t.Parallel()
blocked, _ := testBlockedEvals(t)
blocked.SetEnabled(false)
@@ -37,6 +38,7 @@ func TestBlockedEvals_Block_Disabled(t *testing.T) {
}
func TestBlockedEvals_Block_SameJob(t *testing.T) {
t.Parallel()
blocked, _ := testBlockedEvals(t)
// Create two blocked evals and add them to the blocked tracker.
@@ -54,6 +56,7 @@ func TestBlockedEvals_Block_SameJob(t *testing.T) {
}
func TestBlockedEvals_Block_PriorUnblocks(t *testing.T) {
t.Parallel()
blocked, _ := testBlockedEvals(t)
// Do unblocks prior to blocking
@@ -75,6 +78,7 @@ func TestBlockedEvals_Block_PriorUnblocks(t *testing.T) {
}
func TestBlockedEvals_GetDuplicates(t *testing.T) {
t.Parallel()
blocked, _ := testBlockedEvals(t)
// Create duplicate blocked evals and add them to the blocked tracker.
@@ -112,6 +116,7 @@ func TestBlockedEvals_GetDuplicates(t *testing.T) {
}
func TestBlockedEvals_UnblockEscaped(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
// Create an escaped eval and add it to the blocked tracker.
@@ -147,6 +152,7 @@ func TestBlockedEvals_UnblockEscaped(t *testing.T) {
}
func TestBlockedEvals_UnblockEligible(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
// Create a blocked eval that is eligible on a specific node class and add
@@ -183,6 +189,7 @@ func TestBlockedEvals_UnblockEligible(t *testing.T) {
}
func TestBlockedEvals_UnblockIneligible(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
// Create a blocked eval that is ineligible on a specific node class and add
@@ -219,6 +226,7 @@ func TestBlockedEvals_UnblockIneligible(t *testing.T) {
}
func TestBlockedEvals_UnblockUnknown(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
// Create a blocked eval that is ineligible on a specific node class and add
@@ -256,6 +264,7 @@ func TestBlockedEvals_UnblockUnknown(t *testing.T) {
}
func TestBlockedEvals_Reblock(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
// Create an evaluation, Enqueue/Dequeue it to get a token
@@ -314,6 +323,7 @@ func TestBlockedEvals_Reblock(t *testing.T) {
// Test the block case in which the eval should be immediately unblocked since
// it is escaped and old
func TestBlockedEvals_Block_ImmediateUnblock_Escaped(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
// Do an unblock prior to blocking
@@ -350,6 +360,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_Escaped(t *testing.T) {
// there is an unblock on an unseen class that occurred while it was in the
// scheduler
func TestBlockedEvals_Block_ImmediateUnblock_UnseenClass_After(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
// Do an unblock prior to blocking
@@ -386,6 +397,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_UnseenClass_After(t *testing.T) {
// there is an unblock on an unseen class that occurred before it was in the
// scheduler
func TestBlockedEvals_Block_ImmediateUnblock_UnseenClass_Before(t *testing.T) {
t.Parallel()
blocked, _ := testBlockedEvals(t)
// Do an unblock prior to blocking
@@ -409,6 +421,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_UnseenClass_Before(t *testing.T) {
// Test the block case in which the eval should be immediately unblocked since
// it a class it is eligible for has been unblocked
func TestBlockedEvals_Block_ImmediateUnblock_SeenClass(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
// Do an unblock prior to blocking
@@ -442,6 +455,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_SeenClass(t *testing.T) {
}
func TestBlockedEvals_UnblockFailed(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
// Create blocked evals that are due to failures
@@ -486,6 +500,7 @@ func TestBlockedEvals_UnblockFailed(t *testing.T) {
}
func TestBlockedEvals_Untrack(t *testing.T) {
t.Parallel()
blocked, _ := testBlockedEvals(t)
// Create two blocked evals and add them to the blocked tracker.

View File

@@ -12,6 +12,7 @@ import (
)
func TestCoreScheduler_EvalGC(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -93,6 +94,7 @@ func TestCoreScheduler_EvalGC(t *testing.T) {
// An EvalGC should never reap a batch job that has not been stopped
func TestCoreScheduler_EvalGC_Batch(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -193,6 +195,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) {
// An EvalGC should reap a batch job that has been stopped
func TestCoreScheduler_EvalGC_BatchStopped(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -280,6 +283,7 @@ func TestCoreScheduler_EvalGC_BatchStopped(t *testing.T) {
}
func TestCoreScheduler_EvalGC_Partial(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -379,6 +383,7 @@ func TestCoreScheduler_EvalGC_Partial(t *testing.T) {
}
func TestCoreScheduler_EvalGC_Force(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -440,6 +445,7 @@ func TestCoreScheduler_EvalGC_Force(t *testing.T) {
}
func TestCoreScheduler_NodeGC(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -486,6 +492,7 @@ func TestCoreScheduler_NodeGC(t *testing.T) {
}
func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -540,6 +547,7 @@ func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) {
}
func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -596,6 +604,7 @@ func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) {
}
func TestCoreScheduler_NodeGC_Force(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -638,6 +647,7 @@ func TestCoreScheduler_NodeGC_Force(t *testing.T) {
}
func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -760,6 +770,7 @@ func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) {
}
func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -898,6 +909,7 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) {
// This test ensures that batch jobs are GC'd in one shot, meaning it all
// allocs/evals and job or nothing
func TestCoreScheduler_JobGC_OneShot(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -1009,6 +1021,7 @@ func TestCoreScheduler_JobGC_OneShot(t *testing.T) {
// This test ensures that stopped jobs are GCd
func TestCoreScheduler_JobGC_Stopped(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -1105,6 +1118,7 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) {
}
func TestCoreScheduler_JobGC_Force(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -1166,6 +1180,7 @@ func TestCoreScheduler_JobGC_Force(t *testing.T) {
// This test ensures parameterized jobs only get gc'd when stopped
func TestCoreScheduler_JobGC_Parameterized(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -1244,6 +1259,7 @@ func TestCoreScheduler_JobGC_Parameterized(t *testing.T) {
// This test ensures periodic jobs don't get GCd til they are stopped
func TestCoreScheduler_JobGC_Periodic(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
@@ -1317,6 +1333,7 @@ func TestCoreScheduler_JobGC_Periodic(t *testing.T) {
}
func TestCoreScheduler_DeploymentGC(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -1366,6 +1383,7 @@ func TestCoreScheduler_DeploymentGC(t *testing.T) {
}
func TestCoreScheduler_DeploymentGC_Force(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -1401,6 +1419,7 @@ func TestCoreScheduler_DeploymentGC_Force(t *testing.T) {
}
func TestCoreScheduler_PartitionEvalReap(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -1442,6 +1461,7 @@ func TestCoreScheduler_PartitionEvalReap(t *testing.T) {
}
func TestCoreScheduler_PartitionDeploymentReap(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)

View File

@@ -14,6 +14,7 @@ import (
)
func TestDeploymentEndpoint_GetDeployment(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -41,6 +42,7 @@ func TestDeploymentEndpoint_GetDeployment(t *testing.T) {
}
func TestDeploymentEndpoint_GetDeployment_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -88,6 +90,7 @@ func TestDeploymentEndpoint_GetDeployment_Blocking(t *testing.T) {
}
func TestDeploymentEndpoint_Fail(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -136,6 +139,7 @@ func TestDeploymentEndpoint_Fail(t *testing.T) {
}
func TestDeploymentEndpoint_Fail_Rollback(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -210,6 +214,7 @@ func TestDeploymentEndpoint_Fail_Rollback(t *testing.T) {
}
func TestDeploymentEndpoint_Pause(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -251,6 +256,7 @@ func TestDeploymentEndpoint_Pause(t *testing.T) {
}
func TestDeploymentEndpoint_Promote(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -314,6 +320,7 @@ func TestDeploymentEndpoint_Promote(t *testing.T) {
}
func TestDeploymentEndpoint_SetAllocHealth(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -380,6 +387,7 @@ func TestDeploymentEndpoint_SetAllocHealth(t *testing.T) {
}
func TestDeploymentEndpoint_SetAllocHealth_Rollback(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -466,6 +474,7 @@ func TestDeploymentEndpoint_SetAllocHealth_Rollback(t *testing.T) {
}
func TestDeploymentEndpoint_List(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -504,6 +513,7 @@ func TestDeploymentEndpoint_List(t *testing.T) {
}
func TestDeploymentEndpoint_List_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -559,6 +569,7 @@ func TestDeploymentEndpoint_List_Blocking(t *testing.T) {
}
func TestDeploymentEndpoint_Allocations(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -592,6 +603,7 @@ func TestDeploymentEndpoint_Allocations(t *testing.T) {
}
func TestDeploymentEndpoint_Allocations_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -657,6 +669,7 @@ func TestDeploymentEndpoint_Allocations_Blocking(t *testing.T) {
}
func TestDeploymentEndpoint_Reap(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)

View File

@@ -49,6 +49,7 @@ func testBrokerFromConfig(t *testing.T, c *Config) *EvalBroker {
}
func TestEvalBroker_Enqueue_Dequeue_Nack_Ack(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
// Enqueue, but broker is disabled!
@@ -224,6 +225,7 @@ func TestEvalBroker_Enqueue_Dequeue_Nack_Ack(t *testing.T) {
}
func TestEvalBroker_Nack_Delay(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
// Enqueue, but broker is disabled!
@@ -381,6 +383,7 @@ func TestEvalBroker_Nack_Delay(t *testing.T) {
}
func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)
@@ -524,6 +527,7 @@ func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) {
}
func TestEvalBroker_Enqueue_Disable(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
// Enqueue
@@ -548,6 +552,7 @@ func TestEvalBroker_Enqueue_Disable(t *testing.T) {
}
func TestEvalBroker_Dequeue_Timeout(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)
@@ -568,6 +573,7 @@ func TestEvalBroker_Dequeue_Timeout(t *testing.T) {
}
func TestEvalBroker_Dequeue_Empty_Timeout(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)
doneCh := make(chan struct{}, 1)
@@ -604,6 +610,7 @@ func TestEvalBroker_Dequeue_Empty_Timeout(t *testing.T) {
// Ensure higher priority dequeued first
func TestEvalBroker_Dequeue_Priority(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)
@@ -637,6 +644,7 @@ func TestEvalBroker_Dequeue_Priority(t *testing.T) {
// Ensure FIFO at fixed priority
func TestEvalBroker_Dequeue_FIFO(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)
NUM := 100
@@ -658,6 +666,7 @@ func TestEvalBroker_Dequeue_FIFO(t *testing.T) {
// Ensure fairness between schedulers
func TestEvalBroker_Dequeue_Fairness(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)
NUM := 1000
@@ -699,6 +708,7 @@ func TestEvalBroker_Dequeue_Fairness(t *testing.T) {
// Ensure we get unblocked
func TestEvalBroker_Dequeue_Blocked(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)
@@ -737,6 +747,7 @@ func TestEvalBroker_Dequeue_Blocked(t *testing.T) {
// Ensure we nack in a timely manner
func TestEvalBroker_Nack_Timeout(t *testing.T) {
t.Parallel()
b := testBroker(t, 5*time.Millisecond)
b.SetEnabled(true)
@@ -772,6 +783,7 @@ func TestEvalBroker_Nack_Timeout(t *testing.T) {
// Ensure we nack in a timely manner
func TestEvalBroker_Nack_TimeoutReset(t *testing.T) {
t.Parallel()
b := testBroker(t, 50*time.Millisecond)
b.SetEnabled(true)
@@ -812,6 +824,7 @@ func TestEvalBroker_Nack_TimeoutReset(t *testing.T) {
}
func TestEvalBroker_PauseResumeNackTimeout(t *testing.T) {
t.Parallel()
b := testBroker(t, 50*time.Millisecond)
b.SetEnabled(true)
@@ -859,6 +872,7 @@ func TestEvalBroker_PauseResumeNackTimeout(t *testing.T) {
}
func TestEvalBroker_DeliveryLimit(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)
@@ -948,6 +962,7 @@ func TestEvalBroker_DeliveryLimit(t *testing.T) {
}
func TestEvalBroker_AckAtDeliveryLimit(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)
@@ -990,6 +1005,7 @@ func TestEvalBroker_AckAtDeliveryLimit(t *testing.T) {
// Ensure fairness between schedulers
func TestEvalBroker_Wait(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)
@@ -1031,6 +1047,7 @@ func TestEvalBroker_Wait(t *testing.T) {
// Ensure that priority is taken into account when enqueueing many evaluations.
func TestEvalBroker_EnqueueAll_Dequeue_Fair(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)
@@ -1075,6 +1092,7 @@ func TestEvalBroker_EnqueueAll_Dequeue_Fair(t *testing.T) {
}
func TestEvalBroker_EnqueueAll_Requeue_Ack(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)
@@ -1131,6 +1149,7 @@ func TestEvalBroker_EnqueueAll_Requeue_Ack(t *testing.T) {
}
func TestEvalBroker_EnqueueAll_Requeue_Nack(t *testing.T) {
t.Parallel()
b := testBroker(t, 0)
b.SetEnabled(true)

View File

@@ -16,6 +16,7 @@ import (
)
func TestEvalEndpoint_GetEval(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -56,6 +57,7 @@ func TestEvalEndpoint_GetEval(t *testing.T) {
}
func TestEvalEndpoint_GetEval_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -133,6 +135,7 @@ func TestEvalEndpoint_GetEval_Blocking(t *testing.T) {
}
func TestEvalEndpoint_Dequeue(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -170,6 +173,7 @@ func TestEvalEndpoint_Dequeue(t *testing.T) {
}
func TestEvalEndpoint_Dequeue_Version_Mismatch(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -195,6 +199,7 @@ func TestEvalEndpoint_Dequeue_Version_Mismatch(t *testing.T) {
}
func TestEvalEndpoint_Ack(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -234,6 +239,7 @@ func TestEvalEndpoint_Ack(t *testing.T) {
}
func TestEvalEndpoint_Nack(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
// Disable all of the schedulers so we can manually dequeue
// evals and check the queue status
@@ -286,6 +292,7 @@ func TestEvalEndpoint_Nack(t *testing.T) {
}
func TestEvalEndpoint_Update(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -333,6 +340,7 @@ func TestEvalEndpoint_Update(t *testing.T) {
}
func TestEvalEndpoint_Create(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -384,6 +392,7 @@ func TestEvalEndpoint_Create(t *testing.T) {
}
func TestEvalEndpoint_Reap(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -418,6 +427,7 @@ func TestEvalEndpoint_Reap(t *testing.T) {
}
func TestEvalEndpoint_List(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -465,6 +475,7 @@ func TestEvalEndpoint_List(t *testing.T) {
}
func TestEvalEndpoint_List_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -529,6 +540,7 @@ func TestEvalEndpoint_List_Blocking(t *testing.T) {
}
func TestEvalEndpoint_Allocations(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -566,6 +578,7 @@ func TestEvalEndpoint_Allocations(t *testing.T) {
}
func TestEvalEndpoint_Allocations_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -620,6 +633,7 @@ func TestEvalEndpoint_Allocations_Blocking(t *testing.T) {
}
func TestEvalEndpoint_Reblock_NonExistent(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -655,6 +669,7 @@ func TestEvalEndpoint_Reblock_NonExistent(t *testing.T) {
}
func TestEvalEndpoint_Reblock_NonBlocked(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -696,6 +711,7 @@ func TestEvalEndpoint_Reblock_NonBlocked(t *testing.T) {
}
func TestEvalEndpoint_Reblock(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})

View File

@@ -71,6 +71,7 @@ func makeLog(buf []byte) *raft.Log {
}
func TestFSM_UpsertNode(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.blockedEvals.SetEnabled(true)
@@ -127,6 +128,7 @@ func TestFSM_UpsertNode(t *testing.T) {
}
func TestFSM_DeregisterNode(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
node := mock.Node()
@@ -168,6 +170,7 @@ func TestFSM_DeregisterNode(t *testing.T) {
}
func TestFSM_UpdateNodeStatus(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.blockedEvals.SetEnabled(true)
@@ -227,6 +230,7 @@ func TestFSM_UpdateNodeStatus(t *testing.T) {
}
func TestFSM_UpdateNodeDrain(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
node := mock.Node()
@@ -269,6 +273,7 @@ func TestFSM_UpdateNodeDrain(t *testing.T) {
}
func TestFSM_RegisterJob(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
job := mock.PeriodicJob()
@@ -317,6 +322,7 @@ func TestFSM_RegisterJob(t *testing.T) {
}
func TestFSM_DeregisterJob_Purge(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
job := mock.PeriodicJob()
@@ -373,6 +379,7 @@ func TestFSM_DeregisterJob_Purge(t *testing.T) {
}
func TestFSM_DeregisterJob_NoPurge(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
job := mock.PeriodicJob()
@@ -432,6 +439,7 @@ func TestFSM_DeregisterJob_NoPurge(t *testing.T) {
}
func TestFSM_UpdateEval(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.evalBroker.SetEnabled(true)
@@ -469,6 +477,7 @@ func TestFSM_UpdateEval(t *testing.T) {
}
func TestFSM_UpdateEval_Blocked(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.evalBroker.SetEnabled(true)
fsm.blockedEvals.SetEnabled(true)
@@ -517,6 +526,7 @@ func TestFSM_UpdateEval_Blocked(t *testing.T) {
}
func TestFSM_UpdateEval_Untrack(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.evalBroker.SetEnabled(true)
fsm.blockedEvals.SetEnabled(true)
@@ -571,6 +581,7 @@ func TestFSM_UpdateEval_Untrack(t *testing.T) {
}
func TestFSM_UpdateEval_NoUntrack(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.evalBroker.SetEnabled(true)
fsm.blockedEvals.SetEnabled(true)
@@ -627,6 +638,7 @@ func TestFSM_UpdateEval_NoUntrack(t *testing.T) {
}
func TestFSM_DeleteEval(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
eval := mock.Eval()
@@ -668,6 +680,7 @@ func TestFSM_DeleteEval(t *testing.T) {
}
func TestFSM_UpsertAllocs(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
alloc := mock.Alloc()
@@ -725,6 +738,7 @@ func TestFSM_UpsertAllocs(t *testing.T) {
}
func TestFSM_UpsertAllocs_SharedJob(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
alloc := mock.Alloc()
@@ -797,6 +811,7 @@ func TestFSM_UpsertAllocs_SharedJob(t *testing.T) {
}
func TestFSM_UpsertAllocs_StrippedResources(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
alloc := mock.Alloc()
@@ -837,6 +852,7 @@ func TestFSM_UpsertAllocs_StrippedResources(t *testing.T) {
}
func TestFSM_UpdateAllocFromClient_Unblock(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.blockedEvals.SetEnabled(true)
state := fsm.State()
@@ -921,6 +937,7 @@ func TestFSM_UpdateAllocFromClient_Unblock(t *testing.T) {
}
func TestFSM_UpdateAllocFromClient(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
state := fsm.State()
@@ -959,6 +976,7 @@ func TestFSM_UpdateAllocFromClient(t *testing.T) {
}
func TestFSM_UpsertVaultAccessor(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.blockedEvals.SetEnabled(true)
@@ -1008,6 +1026,7 @@ func TestFSM_UpsertVaultAccessor(t *testing.T) {
}
func TestFSM_DeregisterVaultAccessor(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.blockedEvals.SetEnabled(true)
@@ -1050,6 +1069,7 @@ func TestFSM_DeregisterVaultAccessor(t *testing.T) {
}
func TestFSM_ApplyPlanResults(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
// Create the request and create a deployment
@@ -1143,6 +1163,7 @@ func TestFSM_ApplyPlanResults(t *testing.T) {
}
func TestFSM_DeploymentStatusUpdate(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.evalBroker.SetEnabled(true)
state := fsm.State()
@@ -1211,6 +1232,7 @@ func TestFSM_DeploymentStatusUpdate(t *testing.T) {
}
func TestFSM_JobStabilityUpdate(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.evalBroker.SetEnabled(true)
state := fsm.State()
@@ -1248,6 +1270,7 @@ func TestFSM_JobStabilityUpdate(t *testing.T) {
}
func TestFSM_DeploymentPromotion(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.evalBroker.SetEnabled(true)
state := fsm.State()
@@ -1352,6 +1375,7 @@ func TestFSM_DeploymentPromotion(t *testing.T) {
}
func TestFSM_DeploymentAllocHealth(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
fsm.evalBroker.SetEnabled(true)
state := fsm.State()
@@ -1458,6 +1482,7 @@ func TestFSM_DeploymentAllocHealth(t *testing.T) {
}
func TestFSM_DeleteDeployment(t *testing.T) {
t.Parallel()
fsm := testFSM(t)
state := fsm.State()
@@ -1531,6 +1556,7 @@ func testSnapshotRestore(t *testing.T, fsm *nomadFSM) *nomadFSM {
}
func TestFSM_SnapshotRestore_Nodes(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()
@@ -1554,6 +1580,7 @@ func TestFSM_SnapshotRestore_Nodes(t *testing.T) {
}
func TestFSM_SnapshotRestore_Jobs(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()
@@ -1577,6 +1604,7 @@ func TestFSM_SnapshotRestore_Jobs(t *testing.T) {
}
func TestFSM_SnapshotRestore_Evals(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()
@@ -1600,6 +1628,7 @@ func TestFSM_SnapshotRestore_Evals(t *testing.T) {
}
func TestFSM_SnapshotRestore_Allocs(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()
@@ -1625,6 +1654,7 @@ func TestFSM_SnapshotRestore_Allocs(t *testing.T) {
}
func TestFSM_SnapshotRestore_Allocs_NoSharedResources(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()
@@ -1655,6 +1685,7 @@ func TestFSM_SnapshotRestore_Allocs_NoSharedResources(t *testing.T) {
}
func TestFSM_SnapshotRestore_Indexes(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()
@@ -1675,6 +1706,7 @@ func TestFSM_SnapshotRestore_Indexes(t *testing.T) {
}
func TestFSM_SnapshotRestore_TimeTable(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
@@ -1696,6 +1728,7 @@ func TestFSM_SnapshotRestore_TimeTable(t *testing.T) {
}
func TestFSM_SnapshotRestore_PeriodicLaunches(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()
@@ -1721,6 +1754,7 @@ func TestFSM_SnapshotRestore_PeriodicLaunches(t *testing.T) {
}
func TestFSM_SnapshotRestore_JobSummary(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()
@@ -1748,6 +1782,7 @@ func TestFSM_SnapshotRestore_JobSummary(t *testing.T) {
}
func TestFSM_SnapshotRestore_VaultAccessors(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()
@@ -1770,6 +1805,7 @@ func TestFSM_SnapshotRestore_VaultAccessors(t *testing.T) {
}
func TestFSM_SnapshotRestore_JobVersions(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()
@@ -1797,6 +1833,7 @@ func TestFSM_SnapshotRestore_JobVersions(t *testing.T) {
}
func TestFSM_SnapshotRestore_Deployments(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()
@@ -1820,6 +1857,7 @@ func TestFSM_SnapshotRestore_Deployments(t *testing.T) {
}
func TestFSM_SnapshotRestore_AddMissingSummary(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()
@@ -1859,6 +1897,7 @@ func TestFSM_SnapshotRestore_AddMissingSummary(t *testing.T) {
}
func TestFSM_ReconcileSummaries(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()

View File

@@ -13,6 +13,7 @@ import (
)
func TestInitializeHeartbeatTimers(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -38,6 +39,7 @@ func TestInitializeHeartbeatTimers(t *testing.T) {
}
func TestResetHeartbeatTimer(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -59,6 +61,7 @@ func TestResetHeartbeatTimer(t *testing.T) {
}
func TestResetHeartbeatTimerLocked(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -79,6 +82,7 @@ func TestResetHeartbeatTimerLocked(t *testing.T) {
}
func TestResetHeartbeatTimerLocked_Renew(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -117,6 +121,7 @@ func TestResetHeartbeatTimerLocked_Renew(t *testing.T) {
}
func TestInvalidateHeartbeat(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -144,6 +149,7 @@ func TestInvalidateHeartbeat(t *testing.T) {
}
func TestClearHeartbeatTimer(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -163,6 +169,7 @@ func TestClearHeartbeatTimer(t *testing.T) {
}
func TestClearAllHeartbeatTimers(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -184,6 +191,7 @@ func TestClearAllHeartbeatTimers(t *testing.T) {
}
func TestServer_HeartbeatTTL_Failover(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()

View File

@@ -18,6 +18,7 @@ import (
)
func TestJobEndpoint_Register(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -93,6 +94,7 @@ func TestJobEndpoint_Register(t *testing.T) {
}
func TestJobEndpoint_Register_InvalidDriverConfig(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -122,6 +124,7 @@ func TestJobEndpoint_Register_InvalidDriverConfig(t *testing.T) {
}
func TestJobEndpoint_Register_Payload(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -151,6 +154,7 @@ func TestJobEndpoint_Register_Payload(t *testing.T) {
}
func TestJobEndpoint_Register_Existing(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -263,6 +267,7 @@ func TestJobEndpoint_Register_Existing(t *testing.T) {
}
func TestJobEndpoint_Register_Periodic(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -311,6 +316,7 @@ func TestJobEndpoint_Register_Periodic(t *testing.T) {
}
func TestJobEndpoint_Register_ParameterizedJob(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -355,6 +361,7 @@ func TestJobEndpoint_Register_ParameterizedJob(t *testing.T) {
}
func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -468,6 +475,7 @@ func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
}
func TestJobEndpoint_Register_Vault_Disabled(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
f := false
@@ -497,6 +505,7 @@ func TestJobEndpoint_Register_Vault_Disabled(t *testing.T) {
}
func TestJobEndpoint_Register_Vault_AllowUnauthenticated(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -546,6 +555,7 @@ func TestJobEndpoint_Register_Vault_AllowUnauthenticated(t *testing.T) {
}
func TestJobEndpoint_Register_Vault_NoToken(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -582,6 +592,7 @@ func TestJobEndpoint_Register_Vault_NoToken(t *testing.T) {
}
func TestJobEndpoint_Register_Vault_Policies(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -716,6 +727,7 @@ func TestJobEndpoint_Register_Vault_Policies(t *testing.T) {
}
func TestJobEndpoint_Revert(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -866,6 +878,7 @@ func TestJobEndpoint_Revert(t *testing.T) {
}
func TestJobEndpoint_Stable(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -922,6 +935,7 @@ func TestJobEndpoint_Stable(t *testing.T) {
}
func TestJobEndpoint_Evaluate(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -994,6 +1008,7 @@ func TestJobEndpoint_Evaluate(t *testing.T) {
}
func TestJobEndpoint_Evaluate_Periodic(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -1030,6 +1045,7 @@ func TestJobEndpoint_Evaluate_Periodic(t *testing.T) {
}
func TestJobEndpoint_Evaluate_ParameterizedJob(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -1068,6 +1084,7 @@ func TestJobEndpoint_Evaluate_ParameterizedJob(t *testing.T) {
}
func TestJobEndpoint_Deregister(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -1203,6 +1220,7 @@ func TestJobEndpoint_Deregister(t *testing.T) {
}
func TestJobEndpoint_Deregister_NonExistent(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -1259,6 +1277,7 @@ func TestJobEndpoint_Deregister_NonExistent(t *testing.T) {
}
func TestJobEndpoint_Deregister_Periodic(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -1310,6 +1329,7 @@ func TestJobEndpoint_Deregister_Periodic(t *testing.T) {
}
func TestJobEndpoint_Deregister_ParameterizedJob(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -1363,6 +1383,7 @@ func TestJobEndpoint_Deregister_ParameterizedJob(t *testing.T) {
}
func TestJobEndpoint_GetJob(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -1433,6 +1454,7 @@ func TestJobEndpoint_GetJob(t *testing.T) {
}
func TestJobEndpoint_GetJob_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -1507,6 +1529,7 @@ func TestJobEndpoint_GetJob_Blocking(t *testing.T) {
}
func TestJobEndpoint_GetJobVersions(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -1572,6 +1595,7 @@ func TestJobEndpoint_GetJobVersions(t *testing.T) {
}
func TestJobEndpoint_GetJobVersions_Diff(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -1661,6 +1685,7 @@ func TestJobEndpoint_GetJobVersions_Diff(t *testing.T) {
}
func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -1743,6 +1768,7 @@ func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) {
}
func TestJobEndpoint_GetJobSummary(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -1796,6 +1822,7 @@ func TestJobEndpoint_GetJobSummary(t *testing.T) {
}
func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -1887,6 +1914,7 @@ func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) {
}
func TestJobEndpoint_ListJobs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -1940,6 +1968,7 @@ func TestJobEndpoint_ListJobs(t *testing.T) {
}
func TestJobEndpoint_ListJobs_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -2004,6 +2033,7 @@ func TestJobEndpoint_ListJobs_Blocking(t *testing.T) {
}
func TestJobEndpoint_Allocations(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -2041,6 +2071,7 @@ func TestJobEndpoint_Allocations(t *testing.T) {
}
func TestJobEndpoint_Allocations_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -2096,6 +2127,7 @@ func TestJobEndpoint_Allocations_Blocking(t *testing.T) {
}
func TestJobEndpoint_Evaluations(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -2131,6 +2163,7 @@ func TestJobEndpoint_Evaluations(t *testing.T) {
}
func TestJobEndpoint_Evaluations_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -2184,6 +2217,7 @@ func TestJobEndpoint_Evaluations_Blocking(t *testing.T) {
}
func TestJobEndpoint_Deployments(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -2213,6 +2247,7 @@ func TestJobEndpoint_Deployments(t *testing.T) {
}
func TestJobEndpoint_Deployments_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -2257,6 +2292,7 @@ func TestJobEndpoint_Deployments_Blocking(t *testing.T) {
}
func TestJobEndpoint_LatestDeployment(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -2289,6 +2325,7 @@ func TestJobEndpoint_LatestDeployment(t *testing.T) {
}
func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -2333,6 +2370,7 @@ func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) {
}
func TestJobEndpoint_Plan_WithDiff(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -2385,6 +2423,7 @@ func TestJobEndpoint_Plan_WithDiff(t *testing.T) {
}
func TestJobEndpoint_Plan_NoDiff(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -2437,6 +2476,7 @@ func TestJobEndpoint_Plan_NoDiff(t *testing.T) {
}
func TestJobEndpoint_ImplicitConstraints_Vault(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -2502,6 +2542,7 @@ func TestJobEndpoint_ImplicitConstraints_Vault(t *testing.T) {
}
func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -2560,6 +2601,7 @@ func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) {
}
func TestJobEndpoint_ValidateJob_InvalidDriverConf(t *testing.T) {
t.Parallel()
// Create a mock job with an invalid config
job := mock.Job()
job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
@@ -2577,6 +2619,7 @@ func TestJobEndpoint_ValidateJob_InvalidDriverConf(t *testing.T) {
}
func TestJobEndpoint_ValidateJob_InvalidSignals(t *testing.T) {
t.Parallel()
// Create a mock job that wants to send a signal to a driver that can't
job := mock.Job()
job.TaskGroups[0].Tasks[0].Driver = "qemu"
@@ -2597,6 +2640,7 @@ func TestJobEndpoint_ValidateJob_InvalidSignals(t *testing.T) {
}
func TestJobEndpoint_ValidateJobUpdate(t *testing.T) {
t.Parallel()
old := mock.Job()
new := mock.Job()
@@ -2629,6 +2673,7 @@ func TestJobEndpoint_ValidateJobUpdate(t *testing.T) {
}
func TestJobEndpoint_Dispatch(t *testing.T) {
t.Parallel()
// No requirements
d1 := mock.Job()

View File

@@ -16,6 +16,7 @@ import (
)
func TestClientEndpoint_Register(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -56,6 +57,7 @@ func TestClientEndpoint_Register(t *testing.T) {
}
func TestClientEndpoint_Register_NoSecret(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -104,6 +106,7 @@ func TestClientEndpoint_Register_NoSecret(t *testing.T) {
}
func TestClientEndpoint_Register_SecretMismatch(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -131,6 +134,7 @@ func TestClientEndpoint_Register_SecretMismatch(t *testing.T) {
}
func TestClientEndpoint_Deregister(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -175,6 +179,7 @@ func TestClientEndpoint_Deregister(t *testing.T) {
}
func TestClientEndpoint_Deregister_Vault(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -235,6 +240,7 @@ func TestClientEndpoint_Deregister_Vault(t *testing.T) {
}
func TestClientEndpoint_UpdateStatus(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -295,6 +301,7 @@ func TestClientEndpoint_UpdateStatus(t *testing.T) {
}
func TestClientEndpoint_UpdateStatus_Vault(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -352,6 +359,7 @@ func TestClientEndpoint_UpdateStatus_Vault(t *testing.T) {
}
func TestClientEndpoint_Register_GetEvals(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -445,6 +453,7 @@ func TestClientEndpoint_Register_GetEvals(t *testing.T) {
}
func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -527,6 +536,7 @@ func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) {
}
func TestClientEndpoint_UpdateStatus_HeartbeatOnly(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
@@ -601,6 +611,7 @@ func TestClientEndpoint_UpdateStatus_HeartbeatOnly(t *testing.T) {
}
func TestClientEndpoint_UpdateDrain(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -648,6 +659,7 @@ func TestClientEndpoint_UpdateDrain(t *testing.T) {
// This test ensures that Nomad marks client state of allocations which are in
// pending/running state to lost when a node is marked as down.
func TestClientEndpoint_Drain_Down(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -775,6 +787,7 @@ func TestClientEndpoint_Drain_Down(t *testing.T) {
}
func TestClientEndpoint_GetNode(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -833,6 +846,7 @@ func TestClientEndpoint_GetNode(t *testing.T) {
}
func TestClientEndpoint_GetNode_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -934,6 +948,7 @@ func TestClientEndpoint_GetNode_Blocking(t *testing.T) {
}
func TestClientEndpoint_GetAllocs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -995,6 +1010,7 @@ func TestClientEndpoint_GetAllocs(t *testing.T) {
}
func TestClientEndpoint_GetClientAllocs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -1066,6 +1082,7 @@ func TestClientEndpoint_GetClientAllocs(t *testing.T) {
}
func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -1158,6 +1175,7 @@ func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) {
}
func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -1249,6 +1267,7 @@ func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) {
}
func TestClientEndpoint_UpdateAlloc(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -1311,6 +1330,7 @@ func TestClientEndpoint_UpdateAlloc(t *testing.T) {
}
func TestClientEndpoint_BatchUpdate(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -1367,6 +1387,7 @@ func TestClientEndpoint_BatchUpdate(t *testing.T) {
}
func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -1443,6 +1464,7 @@ func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) {
}
func TestClientEndpoint_CreateNodeEvals(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -1530,6 +1552,7 @@ func TestClientEndpoint_CreateNodeEvals(t *testing.T) {
}
func TestClientEndpoint_Evaluate(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -1610,6 +1633,7 @@ func TestClientEndpoint_Evaluate(t *testing.T) {
}
func TestClientEndpoint_ListNodes(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -1670,6 +1694,7 @@ func TestClientEndpoint_ListNodes(t *testing.T) {
}
func TestClientEndpoint_ListNodes_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -1782,6 +1807,7 @@ func TestClientEndpoint_ListNodes_Blocking(t *testing.T) {
}
func TestBatchFuture(t *testing.T) {
t.Parallel()
bf := NewBatchFuture()
// Async respond to the future
@@ -1809,6 +1835,7 @@ func TestBatchFuture(t *testing.T) {
}
func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -1889,6 +1916,7 @@ func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) {
}
func TestClientEndpoint_DeriveVaultToken(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
@@ -1980,6 +2008,7 @@ func TestClientEndpoint_DeriveVaultToken(t *testing.T) {
}
func TestClientEndpoint_DeriveVaultToken_VaultError(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()

View File

@@ -13,6 +13,7 @@ import (
)
func TestOperator_RaftGetConfiguration(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -54,6 +55,7 @@ func TestOperator_RaftGetConfiguration(t *testing.T) {
}
func TestOperator_RaftRemovePeerByAddress(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)

View File

@@ -11,6 +11,7 @@ import (
)
func TestPeriodicEndpoint_Force(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
@@ -57,6 +58,7 @@ func TestPeriodicEndpoint_Force(t *testing.T) {
}
func TestPeriodicEndpoint_Force_NonPeriodic(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})

View File

@@ -98,6 +98,7 @@ func testPeriodicJob(times ...time.Time) *structs.Job {
}
func TestPeriodicDispatch_Add_NonPeriodic(t *testing.T) {
t.Parallel()
p, _ := testPeriodicDispatcher()
job := mock.Job()
if err := p.Add(job); err != nil {
@@ -111,6 +112,7 @@ func TestPeriodicDispatch_Add_NonPeriodic(t *testing.T) {
}
func TestPeriodicDispatch_Add_Periodic_Parameterized(t *testing.T) {
t.Parallel()
p, _ := testPeriodicDispatcher()
job := mock.PeriodicJob()
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
@@ -125,6 +127,7 @@ func TestPeriodicDispatch_Add_Periodic_Parameterized(t *testing.T) {
}
func TestPeriodicDispatch_Add_UpdateJob(t *testing.T) {
t.Parallel()
p, _ := testPeriodicDispatcher()
job := mock.PeriodicJob()
if err := p.Add(job); err != nil {
@@ -153,6 +156,7 @@ func TestPeriodicDispatch_Add_UpdateJob(t *testing.T) {
}
func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) {
t.Parallel()
p, _ := testPeriodicDispatcher()
job := mock.PeriodicJob()
if err := p.Add(job); err != nil {
@@ -177,6 +181,7 @@ func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) {
}
func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) {
t.Parallel()
p, m := testPeriodicDispatcher()
// Create a job that won't be evalauted for a while.
@@ -215,6 +220,7 @@ func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) {
}
func TestPeriodicDispatch_Remove_Untracked(t *testing.T) {
t.Parallel()
p, _ := testPeriodicDispatcher()
if err := p.Remove("foo"); err != nil {
t.Fatalf("Remove failed %v; expected a no-op", err)
@@ -222,6 +228,7 @@ func TestPeriodicDispatch_Remove_Untracked(t *testing.T) {
}
func TestPeriodicDispatch_Remove_Tracked(t *testing.T) {
t.Parallel()
p, _ := testPeriodicDispatcher()
job := mock.PeriodicJob()
@@ -245,6 +252,7 @@ func TestPeriodicDispatch_Remove_Tracked(t *testing.T) {
}
func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) {
t.Parallel()
p, _ := testPeriodicDispatcher()
// Create a job that will be evaluated soon.
@@ -270,6 +278,7 @@ func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) {
}
func TestPeriodicDispatch_ForceRun_Untracked(t *testing.T) {
t.Parallel()
p, _ := testPeriodicDispatcher()
if _, err := p.ForceRun("foo"); err == nil {
@@ -278,6 +287,7 @@ func TestPeriodicDispatch_ForceRun_Untracked(t *testing.T) {
}
func TestPeriodicDispatch_ForceRun_Tracked(t *testing.T) {
t.Parallel()
p, m := testPeriodicDispatcher()
// Create a job that won't be evalauted for a while.
@@ -306,6 +316,7 @@ func TestPeriodicDispatch_ForceRun_Tracked(t *testing.T) {
}
func TestPeriodicDispatch_Run_DisallowOverlaps(t *testing.T) {
t.Parallel()
p, m := testPeriodicDispatcher()
// Create a job that will trigger two launches but disallows overlapping.
@@ -335,6 +346,7 @@ func TestPeriodicDispatch_Run_DisallowOverlaps(t *testing.T) {
}
func TestPeriodicDispatch_Run_Multiple(t *testing.T) {
t.Parallel()
p, m := testPeriodicDispatcher()
// Create a job that will be launched twice.
@@ -366,6 +378,7 @@ func TestPeriodicDispatch_Run_Multiple(t *testing.T) {
}
func TestPeriodicDispatch_Run_SameTime(t *testing.T) {
t.Parallel()
p, m := testPeriodicDispatcher()
// Create two job that will be launched at the same time.
@@ -402,6 +415,7 @@ func TestPeriodicDispatch_Run_SameTime(t *testing.T) {
// some after each other and some invalid times, and ensures the correct
// behavior.
func TestPeriodicDispatch_Complex(t *testing.T) {
t.Parallel()
p, m := testPeriodicDispatcher()
// Create some jobs launching at different times.
@@ -485,6 +499,7 @@ func shuffle(jobs []*structs.Job) {
}
func TestPeriodicHeap_Order(t *testing.T) {
t.Parallel()
h := NewPeriodicHeap()
j1 := mock.PeriodicJob()
j2 := mock.PeriodicJob()
@@ -522,6 +537,7 @@ func deriveChildJob(parent *structs.Job) *structs.Job {
}
func TestPeriodicDispatch_RunningChildren_NoEvals(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -544,6 +560,7 @@ func TestPeriodicDispatch_RunningChildren_NoEvals(t *testing.T) {
}
func TestPeriodicDispatch_RunningChildren_ActiveEvals(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -579,6 +596,7 @@ func TestPeriodicDispatch_RunningChildren_ActiveEvals(t *testing.T) {
}
func TestPeriodicDispatch_RunningChildren_ActiveAllocs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)

View File

@@ -8,6 +8,7 @@ import (
)
func TestEvaluatePool(t *testing.T) {
t.Parallel()
state := testStateStore(t)
node := mock.Node()
state.UpsertNode(1000, node)
@@ -40,6 +41,7 @@ func TestEvaluatePool(t *testing.T) {
}
func TestEvaluatePool_Resize(t *testing.T) {
t.Parallel()
pool := NewEvaluatePool(1, 4)
defer pool.Shutdown()
if n := pool.Size(); n != 1 {

View File

@@ -59,6 +59,7 @@ func testRegisterJob(t *testing.T, s *Server, j *structs.Job) {
}
func TestPlanApply_applyPlan(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
testutil.WaitForLeader(t, s1.RPC)
@@ -238,6 +239,7 @@ func TestPlanApply_applyPlan(t *testing.T) {
}
func TestPlanApply_EvalPlan_Simple(t *testing.T) {
t.Parallel()
state := testStateStore(t)
node := mock.Node()
state.UpsertNode(1000, node)
@@ -280,6 +282,7 @@ func TestPlanApply_EvalPlan_Simple(t *testing.T) {
}
func TestPlanApply_EvalPlan_Partial(t *testing.T) {
t.Parallel()
state := testStateStore(t)
node := mock.Node()
state.UpsertNode(1000, node)
@@ -336,6 +339,7 @@ func TestPlanApply_EvalPlan_Partial(t *testing.T) {
}
func TestPlanApply_EvalPlan_Partial_AllAtOnce(t *testing.T) {
t.Parallel()
state := testStateStore(t)
node := mock.Node()
state.UpsertNode(1000, node)
@@ -385,6 +389,7 @@ func TestPlanApply_EvalPlan_Partial_AllAtOnce(t *testing.T) {
}
func TestPlanApply_EvalNodePlan_Simple(t *testing.T) {
t.Parallel()
state := testStateStore(t)
node := mock.Node()
state.UpsertNode(1000, node)
@@ -410,6 +415,7 @@ func TestPlanApply_EvalNodePlan_Simple(t *testing.T) {
}
func TestPlanApply_EvalNodePlan_NodeNotReady(t *testing.T) {
t.Parallel()
state := testStateStore(t)
node := mock.Node()
node.Status = structs.NodeStatusInit
@@ -436,6 +442,7 @@ func TestPlanApply_EvalNodePlan_NodeNotReady(t *testing.T) {
}
func TestPlanApply_EvalNodePlan_NodeDrain(t *testing.T) {
t.Parallel()
state := testStateStore(t)
node := mock.Node()
node.Drain = true
@@ -462,6 +469,7 @@ func TestPlanApply_EvalNodePlan_NodeDrain(t *testing.T) {
}
func TestPlanApply_EvalNodePlan_NodeNotExist(t *testing.T) {
t.Parallel()
state := testStateStore(t)
snap, _ := state.Snapshot()
@@ -486,6 +494,7 @@ func TestPlanApply_EvalNodePlan_NodeNotExist(t *testing.T) {
}
func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
state := testStateStore(t)
node := mock.Node()
@@ -520,6 +529,7 @@ func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) {
}
func TestPlanApply_EvalNodePlan_UpdateExisting(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
state := testStateStore(t)
node := mock.Node()
@@ -549,6 +559,7 @@ func TestPlanApply_EvalNodePlan_UpdateExisting(t *testing.T) {
}
func TestPlanApply_EvalNodePlan_NodeFull_Evict(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
state := testStateStore(t)
node := mock.Node()
@@ -585,6 +596,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_Evict(t *testing.T) {
}
func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
state := testStateStore(t)
node := mock.Node()
@@ -616,6 +628,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) {
}
func TestPlanApply_EvalNodePlan_NodeDown_EvictOnly(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
state := testStateStore(t)
node := mock.Node()

View File

@@ -11,6 +11,7 @@ import (
)
func TestPlanEndpoint_Submit(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0
})

View File

@@ -17,6 +17,7 @@ func testPlanQueue(t *testing.T) *PlanQueue {
}
func TestPlanQueue_Enqueue_Dequeue(t *testing.T) {
t.Parallel()
pq := testPlanQueue(t)
if pq.Enabled() {
t.Fatalf("should not be enabled")
@@ -74,6 +75,7 @@ func TestPlanQueue_Enqueue_Dequeue(t *testing.T) {
}
func TestPlanQueue_Enqueue_Disable(t *testing.T) {
t.Parallel()
pq := testPlanQueue(t)
// Enqueue
@@ -104,6 +106,7 @@ func TestPlanQueue_Enqueue_Disable(t *testing.T) {
}
func TestPlanQueue_Dequeue_Timeout(t *testing.T) {
t.Parallel()
pq := testPlanQueue(t)
pq.SetEnabled(true)
@@ -125,6 +128,7 @@ func TestPlanQueue_Dequeue_Timeout(t *testing.T) {
// Ensure higher priority dequeued first
func TestPlanQueue_Dequeue_Priority(t *testing.T) {
t.Parallel()
pq := testPlanQueue(t)
pq.SetEnabled(true)
@@ -158,6 +162,7 @@ func TestPlanQueue_Dequeue_Priority(t *testing.T) {
// Ensure FIFO at fixed priority
func TestPlanQueue_Dequeue_FIFO(t *testing.T) {
t.Parallel()
pq := testPlanQueue(t)
pq.SetEnabled(true)

View File

@@ -10,6 +10,7 @@ import (
)
func TestRegionList(t *testing.T) {
t.Parallel()
// Make the servers
s1 := testServer(t, func(c *Config) {
c.Region = "region1"

View File

@@ -23,6 +23,7 @@ func rpcClient(t *testing.T, s *Server) rpc.ClientCodec {
}
func TestRPC_forwardLeader(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
s2 := testServer(t, func(c *Config) {
@@ -61,6 +62,7 @@ func TestRPC_forwardLeader(t *testing.T) {
}
func TestRPC_forwardRegion(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
s2 := testServer(t, func(c *Config) {

View File

@@ -13,6 +13,7 @@ import (
)
func TestNomad_JoinPeer(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
s2 := testServer(t, func(c *Config) {
@@ -53,6 +54,7 @@ func TestNomad_JoinPeer(t *testing.T) {
}
func TestNomad_RemovePeer(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
s2 := testServer(t, func(c *Config) {
@@ -91,6 +93,7 @@ func TestNomad_RemovePeer(t *testing.T) {
}
func TestNomad_ReapPeer(t *testing.T) {
t.Parallel()
dir := tmpDir(t)
defer os.RemoveAll(dir)
s1 := testServer(t, func(c *Config) {
@@ -179,6 +182,7 @@ func TestNomad_ReapPeer(t *testing.T) {
}
func TestNomad_BootstrapExpect(t *testing.T) {
t.Parallel()
dir := tmpDir(t)
defer os.RemoveAll(dir)
@@ -279,6 +283,7 @@ func TestNomad_BootstrapExpect(t *testing.T) {
}
func TestNomad_BadExpect(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.BootstrapExpect = 2
c.DevDisableBootstrap = true

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"net"
"os"
"path"
@@ -18,12 +19,11 @@ import (
)
var (
nextPort uint32 = 15000
nodeNumber uint32 = 0
)
func getPort() int {
return int(atomic.AddUint32(&nextPort, 1))
return 1030 + int(rand.Int31n(6440))
}
func testLogger() *log.Logger {
@@ -43,16 +43,11 @@ func testServer(t *testing.T, cb func(*Config)) *Server {
config := DefaultConfig()
config.Build = "unittest"
config.DevMode = true
config.RPCAddr = &net.TCPAddr{
IP: []byte{127, 0, 0, 1},
Port: getPort(),
}
nodeNum := atomic.AddUint32(&nodeNumber, 1)
config.NodeName = fmt.Sprintf("nomad-%03d", nodeNum)
// Tighten the Serf timing
config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
config.SerfConfig.MemberlistConfig.BindPort = getPort()
config.SerfConfig.MemberlistConfig.SuspicionMult = 2
config.SerfConfig.MemberlistConfig.RetransmitMult = 2
config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
@@ -85,12 +80,30 @@ func testServer(t *testing.T, cb func(*Config)) *Server {
logger := log.New(config.LogOutput, fmt.Sprintf("[%s] ", config.NodeName), log.LstdFlags)
catalog := consul.NewMockCatalog(logger)
// Create server
server, err := NewServer(config, catalog, logger)
if err != nil {
t.Fatalf("err: %v", err)
for i := 10; i >= 0; i-- {
// Get random ports
config.RPCAddr = &net.TCPAddr{
IP: []byte{127, 0, 0, 1},
Port: getPort(),
}
config.SerfConfig.MemberlistConfig.BindPort = getPort()
// Create server
server, err := NewServer(config, catalog, logger)
if err == nil {
return server
} else if i == 0 {
t.Fatalf("err: %v", err)
} else {
if server != nil {
server.Shutdown()
}
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
time.Sleep(wait)
}
}
return server
return nil
}
func testJoin(t *testing.T, s1 *Server, other ...*Server) {
@@ -106,6 +119,7 @@ func testJoin(t *testing.T, s1 *Server, other ...*Server) {
}
func TestServer_RPC(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
@@ -116,6 +130,7 @@ func TestServer_RPC(t *testing.T) {
}
func TestServer_RPC_MixedTLS(t *testing.T) {
t.Parallel()
const (
cafile = "../helper/tlsutil/testdata/ca.pem"
foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
@@ -191,6 +206,7 @@ func TestServer_RPC_MixedTLS(t *testing.T) {
}
func TestServer_Regions(t *testing.T) {
t.Parallel()
// Make the servers
s1 := testServer(t, func(c *Config) {
c.Region = "region1"
@@ -222,6 +238,7 @@ func TestServer_Regions(t *testing.T) {
}
func TestServer_Reload_Vault(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.Region = "region1"
})

View File

@@ -9,6 +9,7 @@ import (
)
func TestStatusVersion(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -39,6 +40,7 @@ func TestStatusVersion(t *testing.T) {
}
func TestStatusPing(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -51,6 +53,7 @@ func TestStatusPing(t *testing.T) {
}
func TestStatusLeader(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -72,6 +75,7 @@ func TestStatusLeader(t *testing.T) {
}
func TestStatusPeers(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)

View File

@@ -13,6 +13,7 @@ import (
)
func TestSystemEndpoint_GarbageCollect(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@@ -62,6 +63,7 @@ func TestSystemEndpoint_GarbageCollect(t *testing.T) {
}
func TestSystemEndpoint_ReconcileSummaries(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)

View File

@@ -11,6 +11,7 @@ import (
)
func TestTimeTable(t *testing.T) {
t.Parallel()
tt := NewTimeTable(time.Second, time.Minute)
index := tt.NearestIndex(time.Now())
@@ -87,6 +88,7 @@ func TestTimeTable(t *testing.T) {
}
func TestTimeTable_SerializeDeserialize(t *testing.T) {
t.Parallel()
tt := NewTimeTable(time.Second, time.Minute)
// Witness some data
@@ -126,6 +128,7 @@ func TestTimeTable_SerializeDeserialize(t *testing.T) {
}
func TestTimeTable_Overflow(t *testing.T) {
t.Parallel()
tt := NewTimeTable(time.Second, 3*time.Second)
// Witness some data

View File

@@ -10,6 +10,7 @@ import (
)
func TestIsNomadServer(t *testing.T) {
t.Parallel()
m := serf.Member{
Name: "foo",
Addr: net.IP([]byte{127, 0, 0, 1}),
@@ -57,6 +58,7 @@ func TestIsNomadServer(t *testing.T) {
}
func TestShuffleStrings(t *testing.T) {
t.Parallel()
// Generate input
inp := make([]string, 10)
for idx := range inp {
@@ -77,6 +79,7 @@ func TestShuffleStrings(t *testing.T) {
}
func TestMaxUint64(t *testing.T) {
t.Parallel()
if maxUint64(1, 2) != 2 {
t.Fatalf("bad")
}

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"log"
"math/rand"
"os"
"reflect"
"strings"
@@ -13,6 +14,7 @@ import (
"golang.org/x/time/rate"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
@@ -145,6 +147,7 @@ func testVaultRoleAndToken(v *testutil.TestVault, t *testing.T, vaultPolicies ma
}
func TestVaultClient_BadConfig(t *testing.T) {
t.Parallel()
conf := &config.VaultConfig{}
logger := log.New(os.Stderr, "", log.LstdFlags)
@@ -168,35 +171,66 @@ func TestVaultClient_BadConfig(t *testing.T) {
}
}
// started seperately.
// Test that the Vault Client can establish a connection even if it is started
// before Vault is available.
func TestVaultClient_EstablishConnection(t *testing.T) {
v := testutil.NewTestVault(t)
defer v.Stop()
t.Parallel()
for i := 10; i >= 0; i-- {
v := testutil.NewTestVaultDelayed(t)
logger := log.New(os.Stderr, "", log.LstdFlags)
v.Config.ConnectionRetryIntv = 100 * time.Millisecond
client, err := NewVaultClient(v.Config, logger, nil)
if err != nil {
t.Fatalf("failed to build vault client: %v", err)
}
logger := log.New(os.Stderr, "", log.LstdFlags)
v.Config.ConnectionRetryIntv = 100 * time.Millisecond
client, err := NewVaultClient(v.Config, logger, nil)
if err != nil {
t.Fatalf("failed to build vault client: %v", err)
// Sleep a little while and check that no connection has been established.
time.Sleep(100 * time.Duration(testutil.TestMultiplier()) * time.Millisecond)
if established, _ := client.ConnectionEstablished(); established {
t.Fatalf("ConnectionEstablished() returned true before Vault server started")
}
// Start Vault
if err := v.Start(); err != nil {
v.Stop()
client.Stop()
if i == 0 {
t.Fatalf("Failed to start vault: %v", err)
}
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
time.Sleep(wait)
continue
}
var waitErr error
testutil.WaitForResult(func() (bool, error) {
return client.ConnectionEstablished()
}, func(err error) {
waitErr = err
})
v.Stop()
client.Stop()
if waitErr != nil {
if i == 0 {
t.Fatalf("Failed to start vault: %v", err)
}
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
time.Sleep(wait)
continue
}
break
}
defer client.Stop()
// Sleep a little while and check that no connection has been established.
time.Sleep(100 * time.Duration(testutil.TestMultiplier()) * time.Millisecond)
if established, _ := client.ConnectionEstablished(); established {
t.Fatalf("ConnectionEstablished() returned true before Vault server started")
}
// Start Vault
v.Start()
waitForConnection(client, t)
}
func TestVaultClient_ValidateRole(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
// Set the configs token in a new test role
@@ -248,7 +282,8 @@ func TestVaultClient_ValidateRole(t *testing.T) {
}
func TestVaultClient_ValidateRole_NonExistant(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
v.Config.Token = defaultTestVaultWhitelistRoleAndToken(v, t, 5)
@@ -287,7 +322,8 @@ func TestVaultClient_ValidateRole_NonExistant(t *testing.T) {
}
func TestVaultClient_ValidateToken(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
// Set the configs token in a new test role
@@ -340,7 +376,8 @@ func TestVaultClient_ValidateToken(t *testing.T) {
}
func TestVaultClient_SetActive(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
logger := log.New(os.Stderr, "", log.LstdFlags)
@@ -369,10 +406,11 @@ func TestVaultClient_SetActive(t *testing.T) {
// Test that we can update the config and things keep working
func TestVaultClient_SetConfig(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
v2 := testutil.NewTestVault(t).Start()
v2 := testutil.NewTestVault(t)
defer v2.Stop()
// Set the configs token in a new test role
@@ -405,7 +443,8 @@ func TestVaultClient_SetConfig(t *testing.T) {
// Test that we can disable vault
func TestVaultClient_SetConfig_Disable(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
logger := log.New(os.Stderr, "", log.LstdFlags)
@@ -438,7 +477,8 @@ func TestVaultClient_SetConfig_Disable(t *testing.T) {
}
func TestVaultClient_RenewalLoop(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
// Set the configs token in a new test role
@@ -494,6 +534,7 @@ func parseTTLFromLookup(s *vapi.Secret, t *testing.T) int64 {
}
func TestVaultClient_LookupToken_Invalid(t *testing.T) {
t.Parallel()
tr := true
conf := &config.VaultConfig{
Enabled: &tr,
@@ -517,7 +558,8 @@ func TestVaultClient_LookupToken_Invalid(t *testing.T) {
}
func TestVaultClient_LookupToken_Root(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
logger := log.New(os.Stderr, "", log.LstdFlags)
@@ -578,7 +620,8 @@ func TestVaultClient_LookupToken_Root(t *testing.T) {
}
func TestVaultClient_LookupToken_Role(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
// Set the configs token in a new test role
@@ -642,7 +685,8 @@ func TestVaultClient_LookupToken_Role(t *testing.T) {
}
func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
logger := log.New(os.Stderr, "", log.LstdFlags)
@@ -700,7 +744,8 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
}
func TestVaultClient_CreateToken_Root(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
logger := log.New(os.Stderr, "", log.LstdFlags)
@@ -743,7 +788,8 @@ func TestVaultClient_CreateToken_Root(t *testing.T) {
}
func TestVaultClient_CreateToken_Whitelist_Role(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
// Set the configs token in a new test role
@@ -790,7 +836,8 @@ func TestVaultClient_CreateToken_Whitelist_Role(t *testing.T) {
}
func TestVaultClient_CreateToken_Root_Target_Role(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
// Create the test role
@@ -840,6 +887,7 @@ func TestVaultClient_CreateToken_Root_Target_Role(t *testing.T) {
}
func TestVaultClient_CreateToken_Blacklist_Role(t *testing.T) {
t.Parallel()
// Need to skip if test is 0.6.4
version, err := testutil.VaultVersion()
if err != nil {
@@ -850,7 +898,7 @@ func TestVaultClient_CreateToken_Blacklist_Role(t *testing.T) {
t.Skipf("Vault has a regression in v0.6.4 that this test hits")
}
v := testutil.NewTestVault(t).Start()
v := testutil.NewTestVault(t)
defer v.Stop()
// Set the configs token in a new test role
@@ -898,7 +946,8 @@ func TestVaultClient_CreateToken_Blacklist_Role(t *testing.T) {
}
func TestVaultClient_CreateToken_Role_InvalidToken(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
// Set the configs token in a new test role
@@ -937,7 +986,8 @@ func TestVaultClient_CreateToken_Role_InvalidToken(t *testing.T) {
}
func TestVaultClient_CreateToken_Role_Unrecoverable(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
// Set the configs token in a new test role
@@ -971,11 +1021,15 @@ func TestVaultClient_CreateToken_Role_Unrecoverable(t *testing.T) {
}
func TestVaultClient_CreateToken_Prestart(t *testing.T) {
v := testutil.NewTestVault(t)
defer v.Stop()
t.Parallel()
vconfig := &config.VaultConfig{
Enabled: helper.BoolToPtr(true),
Token: structs.GenerateUUID(),
Addr: "http://127.0.0.1:0",
}
logger := log.New(os.Stderr, "", log.LstdFlags)
client, err := NewVaultClient(v.Config, logger, nil)
client, err := NewVaultClient(vconfig, logger, nil)
if err != nil {
t.Fatalf("failed to build vault client: %v", err)
}
@@ -1000,9 +1054,14 @@ func TestVaultClient_CreateToken_Prestart(t *testing.T) {
}
func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) {
v := testutil.NewTestVault(t)
t.Parallel()
vconfig := &config.VaultConfig{
Enabled: helper.BoolToPtr(true),
Token: structs.GenerateUUID(),
Addr: "http://127.0.0.1:0",
}
logger := log.New(os.Stderr, "", log.LstdFlags)
client, err := NewVaultClient(v.Config, logger, nil)
client, err := NewVaultClient(vconfig, logger, nil)
if err != nil {
t.Fatalf("failed to build vault client: %v", err)
}
@@ -1039,7 +1098,8 @@ func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) {
}
func TestVaultClient_RevokeTokens_Root(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
purged := 0
@@ -1103,7 +1163,8 @@ func TestVaultClient_RevokeTokens_Root(t *testing.T) {
}
func TestVaultClient_RevokeTokens_Role(t *testing.T) {
v := testutil.NewTestVault(t).Start()
t.Parallel()
v := testutil.NewTestVault(t)
defer v.Stop()
// Set the configs token in a new test role

View File

@@ -44,6 +44,7 @@ func init() {
}
func TestWorker_dequeueEvaluation(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0
c.EnabledSchedulers = []string{structs.JobTypeService}
@@ -74,6 +75,7 @@ func TestWorker_dequeueEvaluation(t *testing.T) {
}
func TestWorker_dequeueEvaluation_paused(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0
c.EnabledSchedulers = []string{structs.JobTypeService}
@@ -117,6 +119,7 @@ func TestWorker_dequeueEvaluation_paused(t *testing.T) {
}
func TestWorker_dequeueEvaluation_shutdown(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0
c.EnabledSchedulers = []string{structs.JobTypeService}
@@ -145,6 +148,7 @@ func TestWorker_dequeueEvaluation_shutdown(t *testing.T) {
}
func TestWorker_sendAck(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0
c.EnabledSchedulers = []string{structs.JobTypeService}
@@ -191,6 +195,7 @@ func TestWorker_sendAck(t *testing.T) {
}
func TestWorker_waitForIndex(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0
c.EnabledSchedulers = []string{structs.JobTypeService}
@@ -225,6 +230,7 @@ func TestWorker_waitForIndex(t *testing.T) {
}
func TestWorker_invokeScheduler(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0
c.EnabledSchedulers = []string{structs.JobTypeService}
@@ -242,6 +248,7 @@ func TestWorker_invokeScheduler(t *testing.T) {
}
func TestWorker_SubmitPlan(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0
c.EnabledSchedulers = []string{structs.JobTypeService}
@@ -303,6 +310,7 @@ func TestWorker_SubmitPlan(t *testing.T) {
}
func TestWorker_SubmitPlan_MissingNodeRefresh(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0
c.EnabledSchedulers = []string{structs.JobTypeService}
@@ -366,6 +374,7 @@ func TestWorker_SubmitPlan_MissingNodeRefresh(t *testing.T) {
}
func TestWorker_UpdateEval(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0
c.EnabledSchedulers = []string{structs.JobTypeService}
@@ -412,6 +421,7 @@ func TestWorker_UpdateEval(t *testing.T) {
}
func TestWorker_CreateEval(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0
c.EnabledSchedulers = []string{structs.JobTypeService}
@@ -459,6 +469,7 @@ func TestWorker_CreateEval(t *testing.T) {
}
func TestWorker_ReblockEval(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0
c.EnabledSchedulers = []string{structs.JobTypeService}

View File

@@ -5,7 +5,6 @@ import (
"math/rand"
"os"
"os/exec"
"runtime"
"time"
"github.com/hashicorp/nomad/nomad/structs"
@@ -36,17 +35,95 @@ type TestVault struct {
// NewTestVault returns a new TestVault instance that has yet to be started
func NewTestVault(t testing.T) *TestVault {
for i := 10; i >= 0; i-- {
port := getPort()
token := structs.GenerateUUID()
bind := fmt.Sprintf("-dev-listen-address=127.0.0.1:%d", port)
http := fmt.Sprintf("http://127.0.0.1:%d", port)
root := fmt.Sprintf("-dev-root-token-id=%s", token)
cmd := exec.Command("vault", "server", "-dev", bind, root)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
// Build the config
conf := vapi.DefaultConfig()
conf.Address = http
// Make the client and set the token to the root token
client, err := vapi.NewClient(conf)
if err != nil {
t.Fatalf("failed to build Vault API client: %v", err)
}
client.SetToken(token)
enable := true
tv := &TestVault{
cmd: cmd,
t: t,
Addr: bind,
HTTPAddr: http,
RootToken: token,
Client: client,
Config: &config.VaultConfig{
Enabled: &enable,
Token: token,
Addr: http,
},
}
if err := tv.cmd.Start(); err != nil {
tv.t.Fatalf("failed to start vault: %v", err)
}
// Start the waiter
tv.waitCh = make(chan error, 1)
go func() {
err := tv.cmd.Wait()
tv.waitCh <- err
}()
// Ensure Vault started
var startErr error
select {
case startErr = <-tv.waitCh:
case <-time.After(time.Duration(500*TestMultiplier()) * time.Millisecond):
}
if startErr != nil && i == 0 {
t.Fatalf("failed to start vault: %v", startErr)
} else if startErr != nil {
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
time.Sleep(wait)
continue
}
waitErr := tv.waitForAPI()
if waitErr != nil && i == 0 {
t.Fatalf("failed to start vault: %v", waitErr)
} else if waitErr != nil {
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
time.Sleep(wait)
continue
}
return tv
}
return nil
}
// NewTestVaultDelayed returns a test Vault server that has not been started.
// Start must be called and it is the callers responsibility to deal with any
// port conflicts that may occur and retry accordingly.
func NewTestVaultDelayed(t testing.T) *TestVault {
port := getPort()
token := structs.GenerateUUID()
bind := fmt.Sprintf("-dev-listen-address=127.0.0.1:%d", port)
http := fmt.Sprintf("http://127.0.0.1:%d", port)
root := fmt.Sprintf("-dev-root-token-id=%s", token)
bin := "vault"
if runtime.GOOS == "windows" {
bin = "vault.exe"
}
cmd := exec.Command(bin, "server", "-dev", bind, root)
cmd := exec.Command("vault", "server", "-dev", bind, root)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
@@ -81,7 +158,7 @@ func NewTestVault(t testing.T) *TestVault {
// Start starts the test Vault server and waits for it to respond to its HTTP
// API
func (tv *TestVault) Start() *TestVault {
func (tv *TestVault) Start() error {
if err := tv.cmd.Start(); err != nil {
tv.t.Fatalf("failed to start vault: %v", err)
}
@@ -96,12 +173,11 @@ func (tv *TestVault) Start() *TestVault {
// Ensure Vault started
select {
case err := <-tv.waitCh:
tv.t.Fatal(err.Error())
return err
case <-time.After(time.Duration(500*TestMultiplier()) * time.Millisecond):
}
tv.waitForAPI()
return tv
return tv.waitForAPI()
}
// Stop stops the test Vault server
@@ -120,7 +196,8 @@ func (tv *TestVault) Stop() {
// waitForAPI waits for the Vault HTTP endpoint to start
// responding. This is an indication that the agent has started.
func (tv *TestVault) waitForAPI() {
func (tv *TestVault) waitForAPI() error {
var waitErr error
WaitForResult(func() (bool, error) {
inited, err := tv.Client.Sys().InitStatus()
if err != nil {
@@ -128,9 +205,9 @@ func (tv *TestVault) waitForAPI() {
}
return inited, nil
}, func(err error) {
defer tv.Stop()
tv.t.Fatalf("err: %s", err)
waitErr = err
})
return waitErr
}
func getPort() int {