From cfe6349378af682c006b6f0a0bcbc998a3b7fee9 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Thu, 22 May 2025 09:18:46 -0400 Subject: [PATCH] testing: migrate nomad/state off testify (#25909) We've been gradually migrating from `testify` to `shoenig/test` on a test-by-test basis. While working on a large refactoring in the state store, I found this to create a lot of diffs incidental to the refactoring. In this changeset, I've used a prototype collection of semgrep fix rules to autofix most of the uses of testify in the `nomad/state` package. Then I went in manually and fixed any resulting problems, as well as a few minor test bugs that `shoenig/test` catches and `testify` does not because of its API. I've also added a semgrep rule for marking a package as "testify clean", so that we don't accidentally add it back to any package we manage to remove it from going forward. While I'm here, I've removed most of the uses of `reflect.DeepEqual` in the tests as well as cleaned up some older idioms that Go has nicer syntax for now. --- .semgrep/imports.yml | 9 + nomad/state/deployment_events_test.go | 33 +- nomad/state/events_test.go | 338 +- nomad/state/indexer/indexer_test.go | 4 +- nomad/state/indexer/time_test.go | 6 +- nomad/state/schema_test.go | 75 +- nomad/state/state_store_acl_test.go | 193 +- .../state_store_service_registration_test.go | 258 +- nomad/state/state_store_test.go | 4166 ++++++----------- 9 files changed, 2006 insertions(+), 3076 deletions(-) diff --git a/.semgrep/imports.yml b/.semgrep/imports.yml index 451213b38..032e1d5e3 100644 --- a/.semgrep/imports.yml +++ b/.semgrep/imports.yml @@ -20,3 +20,12 @@ rules: paths: include: - "*.go" + + - id: "disallow-new-testify-imports" + pattern: import "github.com/stretchr/testify" + message: "Do not import testify in packages where it has been removed" + languages: [go] + severity: "ERROR" + paths: + include: + - "nomad/state/*_test.go" diff --git a/nomad/state/deployment_events_test.go b/nomad/state/deployment_events_test.go index 2946679fb..0dfec2b15 100644 --- a/nomad/state/deployment_events_test.go +++ b/nomad/state/deployment_events_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" - "github.com/stretchr/testify/require" + "github.com/shoenig/test/must" ) func TestDeploymentEventFromChanges(t *testing.T) { @@ -30,8 +30,8 @@ func TestDeploymentEventFromChanges(t *testing.T) { d := mock.Deployment() d.JobID = j.ID - require.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) - require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) + must.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) + must.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) setupTx.Txn.Commit() @@ -47,19 +47,18 @@ func TestDeploymentEventFromChanges(t *testing.T) { // Exlude Job and assert its added } - require.NoError(t, s.UpdateDeploymentStatus(msgType, 100, req)) + must.NoError(t, s.UpdateDeploymentStatus(msgType, 100, req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) - require.Len(t, events, 2) + must.Len(t, 2, events) got := events[0] - require.Equal(t, uint64(100), got.Index) - require.Equal(t, d.ID, got.Key) + must.Eq(t, uint64(100), got.Index) + must.Eq(t, d.ID, got.Key) de := got.Payload.(*structs.DeploymentEvent) - require.Equal(t, structs.DeploymentStatusPaused, de.Deployment.Status) - require.Contains(t, got.FilterKeys, j.ID) - + must.Eq(t, structs.DeploymentStatusPaused, de.Deployment.Status) + must.SliceContains(t, got.FilterKeys, j.ID) } func WaitForEvents(t *testing.T, s *StateStore, index uint64, minEvents int, timeout time.Duration) []structs.Event { @@ -71,7 +70,7 @@ func WaitForEvents(t *testing.T, s *StateStore, index uint64, minEvents int, tim case <-ctx.Done(): return case <-time.After(timeout): - require.Fail(t, "timeout waiting for events") + t.Fatal("timeout waiting for events") } }() @@ -82,16 +81,16 @@ func WaitForEvents(t *testing.T, s *StateStore, index uint64, minEvents int, tim return got } maxAttempts-- - if maxAttempts == 0 { - require.Failf(t, "reached max attempts waiting for desired event count", "count %d got: %+v", len(got), got) - } + must.NotEq(t, 0, maxAttempts, must.Sprintf( + "reached max attempts waiting for desired event count: count %d got: %+v", + len(got), got)) time.Sleep(10 * time.Millisecond) } } func EventsForIndex(t *testing.T, s *StateStore, index uint64) []structs.Event { pub, err := s.EventBroker() - require.NoError(t, err) + must.NoError(t, err) sub, err := pub.Subscribe(&stream.SubscribeRequest{ Topics: map[structs.Topic][]string{ @@ -106,12 +105,12 @@ func EventsForIndex(t *testing.T, s *StateStore, index uint64) []structs.Event { } defer sub.Unsubscribe() - require.NoError(t, err) + must.NoError(t, err) var events []structs.Event for { e, err := sub.NextNoBlock() - require.NoError(t, err) + must.NoError(t, err) if e == nil { break } diff --git a/nomad/state/events_test.go b/nomad/state/events_test.go index 47fc17f75..6aca0d844 100644 --- a/nomad/state/events_test.go +++ b/nomad/state/events_test.go @@ -14,7 +14,6 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/shoenig/test/must" - "github.com/stretchr/testify/require" ) // TestEventFromChange_SingleEventPerTable ensures that only a single event is @@ -42,8 +41,8 @@ func TestEventFromChange_SingleEventPerTable(t *testing.T) { } out := eventsFromChanges(s.db.ReadTxn(), changes) - require.Len(t, out.Events, 1) - require.Equal(t, out.Events[0].Type, structs.TypeJobRegistered) + must.Len(t, 1, out.Events) + must.Eq(t, out.Events[0].Type, structs.TypeJobRegistered) } func TestEventFromChange_ACLTokenSecretID(t *testing.T) { @@ -52,7 +51,7 @@ func TestEventFromChange_ACLTokenSecretID(t *testing.T) { defer s.StopEventBroker() token := mock.ACLToken() - require.NotEmpty(t, token.SecretID) + must.NotEq(t, "", token.SecretID) // Create changes := Changes{ @@ -68,15 +67,15 @@ func TestEventFromChange_ACLTokenSecretID(t *testing.T) { } out := eventsFromChanges(s.db.ReadTxn(), changes) - require.Len(t, out.Events, 1) + must.Len(t, 1, out.Events) // Ensure original value not altered - require.NotEmpty(t, token.SecretID) + must.NotEq(t, "", token.SecretID) aclTokenEvent, ok := out.Events[0].Payload.(*structs.ACLTokenEvent) - require.True(t, ok) - require.Empty(t, aclTokenEvent.ACLToken.SecretID) + must.True(t, ok) + must.Eq(t, "", aclTokenEvent.ACLToken.SecretID) - require.Equal(t, token.SecretID, aclTokenEvent.SecretID()) + must.Eq(t, token.SecretID, aclTokenEvent.SecretID()) // Delete changes = Changes{ @@ -92,11 +91,11 @@ func TestEventFromChange_ACLTokenSecretID(t *testing.T) { } out2 := eventsFromChanges(s.db.ReadTxn(), changes) - require.Len(t, out2.Events, 1) + must.Len(t, 1, out2.Events) tokenEvent2, ok := out2.Events[0].Payload.(*structs.ACLTokenEvent) - require.True(t, ok) - require.Empty(t, tokenEvent2.ACLToken.SecretID) + must.True(t, ok) + must.Eq(t, "", tokenEvent2.ACLToken.SecretID) } func TestEventsFromChanges_DeploymentUpdate(t *testing.T) { @@ -114,8 +113,8 @@ func TestEventsFromChanges_DeploymentUpdate(t *testing.T) { d := mock.Deployment() d.JobID = j.ID - require.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) - require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) + must.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) + must.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) setupTx.Txn.Commit() @@ -131,18 +130,18 @@ func TestEventsFromChanges_DeploymentUpdate(t *testing.T) { // Exlude Job and assert its added } - require.NoError(t, s.UpdateDeploymentStatus(msgType, 100, req)) + must.NoError(t, s.UpdateDeploymentStatus(msgType, 100, req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) - require.Len(t, events, 2) + must.Len(t, 2, events) got := events[0] - require.Equal(t, uint64(100), got.Index) - require.Equal(t, d.ID, got.Key) + must.Eq(t, uint64(100), got.Index) + must.Eq(t, d.ID, got.Key) de := got.Payload.(*structs.DeploymentEvent) - require.Equal(t, structs.DeploymentStatusPaused, de.Deployment.Status) - require.Contains(t, got.FilterKeys, j.ID) + must.Eq(t, structs.DeploymentStatusPaused, de.Deployment.Status) + must.SliceContains(t, got.FilterKeys, j.ID) } func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { @@ -158,7 +157,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { tg2 := tg1.Copy() tg2.Name = "foo" j.TaskGroups = append(j.TaskGroups, tg2) - require.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) + must.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) d := mock.Deployment() d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion @@ -173,7 +172,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { DesiredCanaries: 1, }, } - require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) + must.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) // create set of allocs c1 := mock.Alloc() @@ -192,7 +191,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { Healthy: pointer.Of(true), } - require.NoError(t, s.upsertAllocsImpl(10, []*structs.Allocation{c1, c2}, setupTx)) + must.NoError(t, s.upsertAllocsImpl(10, []*structs.Allocation{c1, c2}, setupTx)) // commit setup transaction setupTx.Txn.Commit() @@ -208,18 +207,18 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { Eval: e, } - require.NoError(t, s.UpdateDeploymentPromotion(msgType, 100, req)) + must.NoError(t, s.UpdateDeploymentPromotion(msgType, 100, req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) - require.Len(t, events, 4) + must.Len(t, 4, events) got := events[0] - require.Equal(t, uint64(100), got.Index) - require.Equal(t, d.ID, got.Key) + must.Eq(t, uint64(100), got.Index) + must.Eq(t, d.ID, got.Key) de := got.Payload.(*structs.DeploymentEvent) - require.Equal(t, structs.DeploymentStatusRunning, de.Deployment.Status) - require.Equal(t, structs.TypeDeploymentPromotion, got.Type) + must.Eq(t, structs.DeploymentStatusRunning, de.Deployment.Status) + must.Eq(t, structs.TypeDeploymentPromotion, got.Type) } func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { @@ -235,7 +234,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { tg2 := tg1.Copy() tg2.Name = "foo" j.TaskGroups = append(j.TaskGroups, tg2) - require.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) + must.NoError(t, s.upsertJobImpl(10, nil, j, false, setupTx)) d := mock.Deployment() d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion @@ -250,7 +249,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { DesiredCanaries: 1, }, } - require.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) + must.NoError(t, s.upsertDeploymentImpl(10, d, setupTx)) // create set of allocs c1 := mock.Alloc() @@ -269,7 +268,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { Healthy: pointer.Of(true), } - require.NoError(t, s.upsertAllocsImpl(10, []*structs.Allocation{c1, c2}, setupTx)) + must.NoError(t, s.upsertAllocsImpl(10, []*structs.Allocation{c1, c2}, setupTx)) // Commit setup setupTx.Commit() @@ -287,10 +286,10 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { }, } - require.NoError(t, s.UpdateDeploymentAllocHealth(msgType, 100, req)) + must.NoError(t, s.UpdateDeploymentAllocHealth(msgType, 100, req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) - require.Len(t, events, 3) + must.Len(t, 3, events) var allocEvents []structs.Event var deploymentEvent []structs.Event @@ -302,19 +301,19 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { } } - require.Len(t, allocEvents, 2) + must.Len(t, 2, allocEvents) for _, e := range allocEvents { - require.Equal(t, 100, int(e.Index)) - require.Equal(t, structs.TypeDeploymentAllocHealth, e.Type) - require.Equal(t, structs.TopicAllocation, e.Topic) + must.Eq(t, 100, int(e.Index)) + must.Eq(t, structs.TypeDeploymentAllocHealth, e.Type) + must.Eq(t, structs.TopicAllocation, e.Topic) } - require.Len(t, deploymentEvent, 1) + must.Len(t, 1, deploymentEvent) for _, e := range deploymentEvent { - require.Equal(t, 100, int(e.Index)) - require.Equal(t, structs.TypeDeploymentAllocHealth, e.Type) - require.Equal(t, structs.TopicDeployment, e.Topic) - require.Equal(t, d.ID, e.Key) + must.Eq(t, 100, int(e.Index)) + must.Eq(t, structs.TypeDeploymentAllocHealth, e.Type) + must.Eq(t, structs.TopicDeployment, e.Topic) + must.Eq(t, d.ID, e.Key) } } @@ -327,8 +326,8 @@ func TestEventsFromChanges_UpsertNodeEventsType(t *testing.T) { n1 := mock.Node() n2 := mock.Node() - require.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, 10, n1)) - require.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, 12, n2)) + must.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, 10, n1)) + must.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, 12, n2)) msgType := structs.UpsertNodeEventsType req := &structs.EmitNodeEventsRequest{ @@ -346,15 +345,15 @@ func TestEventsFromChanges_UpsertNodeEventsType(t *testing.T) { }, } - require.NoError(t, s.UpsertNodeEvents(msgType, 100, req.NodeEvents)) + must.NoError(t, s.UpsertNodeEvents(msgType, 100, req.NodeEvents)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) - require.Len(t, events, 2) + must.Len(t, 2, events) for _, e := range events { - require.Equal(t, structs.TopicNode, e.Topic) - require.Equal(t, structs.TypeNodeEvent, e.Type) + must.Eq(t, structs.TopicNode, e.Topic) + must.Eq(t, structs.TypeNodeEvent, e.Type) event := e.Payload.(*structs.NodeStreamEvent) - require.Equal(t, "update", event.Node.Events[len(event.Node.Events)-1].Message) + must.Eq(t, "update", event.Node.Events[len(event.Node.Events)-1].Message) } } @@ -367,7 +366,7 @@ func TestEventsFromChanges_NodeUpdateStatusRequest(t *testing.T) { // setup n1 := mock.Node() - require.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, 10, n1)) + must.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, 10, n1)) updated := time.Now() msgType := structs.NodeUpdateStatusRequestType @@ -378,16 +377,16 @@ func TestEventsFromChanges_NodeUpdateStatusRequest(t *testing.T) { NodeEvent: &structs.NodeEvent{Message: "down"}, } - require.NoError(t, s.UpdateNodeStatus(msgType, 100, req.NodeID, req.Status, req.UpdatedAt, req.NodeEvent)) + must.NoError(t, s.UpdateNodeStatus(msgType, 100, req.NodeID, req.Status, req.UpdatedAt, req.NodeEvent)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) - require.Len(t, events, 1) + must.Len(t, 1, events) e := events[0] - require.Equal(t, structs.TopicNode, e.Topic) - require.Equal(t, structs.TypeNodeEvent, e.Type) + must.Eq(t, structs.TopicNode, e.Topic) + must.Eq(t, structs.TypeNodeEvent, e.Type) event := e.Payload.(*structs.NodeStreamEvent) - require.Equal(t, "down", event.Node.Events[len(event.Node.Events)-1].Message) - require.Equal(t, structs.NodeStatusDown, event.Node.Status) + must.Eq(t, "down", event.Node.Events[len(event.Node.Events)-1].Message) + must.Eq(t, structs.NodeStatusDown, event.Node.Status) } func TestEventsFromChanges_NodePoolUpsertRequestType(t *testing.T) { @@ -454,7 +453,7 @@ func TestEventsFromChanges_EvalUpdateRequestType(t *testing.T) { // setup e1 := mock.Eval() - require.NoError(t, s.UpsertEvals(structs.MsgTypeTestSetup, 10, []*structs.Evaluation{e1})) + must.NoError(t, s.UpsertEvals(structs.MsgTypeTestSetup, 10, []*structs.Evaluation{e1})) e2 := mock.Eval() e2.ID = e1.ID @@ -466,18 +465,18 @@ func TestEventsFromChanges_EvalUpdateRequestType(t *testing.T) { Evals: []*structs.Evaluation{e2}, } - require.NoError(t, s.UpsertEvals(msgType, 100, req.Evals)) + must.NoError(t, s.UpsertEvals(msgType, 100, req.Evals)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) - require.Len(t, events, 1) + must.Len(t, 1, events) e := events[0] - require.Equal(t, structs.TopicEvaluation, e.Topic) - require.Equal(t, structs.TypeEvalUpdated, e.Type) - require.Contains(t, e.FilterKeys, e2.JobID) - require.Contains(t, e.FilterKeys, e2.DeploymentID) + must.Eq(t, structs.TopicEvaluation, e.Topic) + must.Eq(t, structs.TypeEvalUpdated, e.Type) + must.SliceContains(t, e.FilterKeys, e2.JobID) + must.SliceContains(t, e.FilterKeys, e2.DeploymentID) event := e.Payload.(*structs.EvaluationEvent) - require.Equal(t, "blocked", event.Evaluation.Status) + must.Eq(t, "blocked", event.Evaluation.Status) } func TestEventsFromChanges_ApplyPlanResultsRequestType(t *testing.T) { @@ -496,13 +495,13 @@ func TestEventsFromChanges_ApplyPlanResultsRequestType(t *testing.T) { alloc.DeploymentID = d.ID alloc2.DeploymentID = d.ID - require.NoError(t, s.UpsertJob(structs.MsgTypeTestSetup, 9, nil, job)) + must.NoError(t, s.UpsertJob(structs.MsgTypeTestSetup, 9, nil, job)) eval := mock.Eval() eval.JobID = job.ID // Create an eval - require.NoError(t, s.UpsertEvals(structs.MsgTypeTestSetup, 10, []*structs.Evaluation{eval})) + must.NoError(t, s.UpsertEvals(structs.MsgTypeTestSetup, 10, []*structs.Evaluation{eval})) msgType := structs.ApplyPlanResultsRequestType req := &structs.ApplyPlanResultsRequest{ @@ -514,10 +513,10 @@ func TestEventsFromChanges_ApplyPlanResultsRequestType(t *testing.T) { EvalID: eval.ID, } - require.NoError(t, s.UpsertPlanResults(msgType, 100, req)) + must.NoError(t, s.UpsertPlanResults(msgType, 100, req)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) - require.Len(t, events, 5) + must.Len(t, 5, events) var allocs []structs.Event var evals []structs.Event @@ -533,12 +532,12 @@ func TestEventsFromChanges_ApplyPlanResultsRequestType(t *testing.T) { } else if e.Topic == structs.TopicDeployment { deploys = append(deploys, e) } - require.Equal(t, structs.TypePlanResult, e.Type) + must.Eq(t, structs.TypePlanResult, e.Type) } - require.Len(t, allocs, 2) - require.Len(t, evals, 1) - require.Len(t, jobs, 1) - require.Len(t, deploys, 1) + must.Len(t, 2, allocs) + must.Len(t, 1, evals) + must.Len(t, 1, jobs) + must.Len(t, 1, deploys) } func TestEventsFromChanges_BatchNodeUpdateDrainRequestType(t *testing.T) { @@ -550,8 +549,8 @@ func TestEventsFromChanges_BatchNodeUpdateDrainRequestType(t *testing.T) { n1 := mock.Node() n2 := mock.Node() - require.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, 10, n1)) - require.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, 11, n2)) + must.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, 10, n1)) + must.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, 11, n2)) updated := time.Now() msgType := structs.BatchNodeUpdateDrainRequestType @@ -582,17 +581,17 @@ func TestEventsFromChanges_BatchNodeUpdateDrainRequestType(t *testing.T) { UpdatedAt: updated.UnixNano(), } - require.NoError(t, s.BatchUpdateNodeDrain(msgType, 100, req.UpdatedAt, req.Updates, req.NodeEvents)) + must.NoError(t, s.BatchUpdateNodeDrain(msgType, 100, req.UpdatedAt, req.Updates, req.NodeEvents)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) - require.Len(t, events, 2) + must.Len(t, 2, events) for _, e := range events { - require.Equal(t, 100, int(e.Index)) - require.Equal(t, structs.TypeNodeDrain, e.Type) - require.Equal(t, structs.TopicNode, e.Topic) + must.Eq(t, 100, int(e.Index)) + must.Eq(t, structs.TypeNodeDrain, e.Type) + must.Eq(t, structs.TopicNode, e.Topic) ne := e.Payload.(*structs.NodeStreamEvent) - require.Equal(t, event.Message, ne.Node.Events[len(ne.Node.Events)-1].Message) + must.Eq(t, event.Message, ne.Node.Events[len(ne.Node.Events)-1].Message) } } @@ -604,7 +603,7 @@ func TestEventsFromChanges_NodeUpdateEligibilityRequestType(t *testing.T) { // setup n1 := mock.Node() - require.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, 10, n1)) + must.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, 10, n1)) msgType := structs.NodeUpdateEligibilityRequestType @@ -621,18 +620,18 @@ func TestEventsFromChanges_NodeUpdateEligibilityRequestType(t *testing.T) { UpdatedAt: time.Now().UnixNano(), } - require.NoError(t, s.UpdateNodeEligibility(msgType, 100, req.NodeID, req.Eligibility, req.UpdatedAt, req.NodeEvent)) + must.NoError(t, s.UpdateNodeEligibility(msgType, 100, req.NodeID, req.Eligibility, req.UpdatedAt, req.NodeEvent)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) - require.Len(t, events, 1) + must.Len(t, 1, events) for _, e := range events { - require.Equal(t, 100, int(e.Index)) - require.Equal(t, structs.TypeNodeDrain, e.Type) - require.Equal(t, structs.TopicNode, e.Topic) + must.Eq(t, 100, int(e.Index)) + must.Eq(t, structs.TypeNodeDrain, e.Type) + must.Eq(t, structs.TopicNode, e.Topic) ne := e.Payload.(*structs.NodeStreamEvent) - require.Equal(t, event.Message, ne.Node.Events[len(ne.Node.Events)-1].Message) - require.Equal(t, structs.NodeSchedulingIneligible, ne.Node.SchedulingEligibility) + must.Eq(t, event.Message, ne.Node.Events[len(ne.Node.Events)-1].Message) + must.Eq(t, structs.NodeSchedulingIneligible, ne.Node.SchedulingEligibility) } } @@ -643,8 +642,8 @@ func TestEventsFromChanges_AllocUpdateDesiredTransitionRequestType(t *testing.T) alloc := mock.Alloc() - require.Nil(t, s.UpsertJob(structs.MsgTypeTestSetup, 10, nil, alloc.Job)) - require.Nil(t, s.UpsertAllocs(structs.MsgTypeTestSetup, 11, []*structs.Allocation{alloc})) + must.Nil(t, s.UpsertJob(structs.MsgTypeTestSetup, 10, nil, alloc.Job)) + must.Nil(t, s.UpsertAllocs(structs.MsgTypeTestSetup, 11, []*structs.Allocation{alloc})) msgType := structs.AllocUpdateDesiredTransitionRequestType @@ -667,10 +666,10 @@ func TestEventsFromChanges_AllocUpdateDesiredTransitionRequestType(t *testing.T) Evals: evals, } - require.NoError(t, s.UpdateAllocsDesiredTransitions(msgType, 100, req.Allocs, req.Evals)) + must.NoError(t, s.UpdateAllocsDesiredTransitions(msgType, 100, req.Allocs, req.Evals)) events := WaitForEvents(t, s, 100, 1, 1*time.Second) - require.Len(t, events, 2) + must.Len(t, 2, events) var allocs []structs.Event var evalEvents []structs.Event @@ -680,14 +679,14 @@ func TestEventsFromChanges_AllocUpdateDesiredTransitionRequestType(t *testing.T) } else if e.Topic == structs.TopicAllocation { allocs = append(allocs, e) } else { - require.Fail(t, "unexpected event type") + t.Fatal("unexpected event type") } - require.Equal(t, structs.TypeAllocationUpdateDesiredStatus, e.Type) + must.Eq(t, structs.TypeAllocationUpdateDesiredStatus, e.Type) } - require.Len(t, allocs, 1) - require.Len(t, evalEvents, 1) + must.Len(t, 1, allocs) + must.Len(t, 1, evalEvents) } func TestEventsFromChanges_JobBatchDeregisterRequestType(t *testing.T) { @@ -724,9 +723,9 @@ func TestEventsFromChanges_WithDeletion(t *testing.T) { } event := eventsFromChanges(nil, changes) - require.NotNil(t, event) + must.NotNil(t, event) - require.Len(t, event.Events, 2) + must.Len(t, 2, event.Events) } func TestEventsFromChanges_WithNodeDeregistration(t *testing.T) { @@ -750,22 +749,22 @@ func TestEventsFromChanges_WithNodeDeregistration(t *testing.T) { } actual := eventsFromChanges(nil, changes) - require.NotNil(t, actual) + must.NotNil(t, actual) - require.Len(t, actual.Events, 1) + must.Len(t, 1, actual.Events) event := actual.Events[0] - require.Equal(t, structs.TypeNodeDeregistration, event.Type) - require.Equal(t, uint64(1), event.Index) - require.Equal(t, structs.TopicNode, event.Topic) - require.Equal(t, "some-id", event.Key) + must.Eq(t, structs.TypeNodeDeregistration, event.Type) + must.Eq(t, uint64(1), event.Index) + must.Eq(t, structs.TopicNode, event.Topic) + must.Eq(t, "some-id", event.Key) - require.Len(t, event.FilterKeys, 0) + must.Len(t, 0, event.FilterKeys) nodeEvent, ok := event.Payload.(*structs.NodeStreamEvent) - require.True(t, ok) - require.Equal(t, *before, *nodeEvent.Node) + must.True(t, ok) + must.Eq(t, *before, *nodeEvent.Node) } func TestNodeEventsFromChanges(t *testing.T) { @@ -838,7 +837,7 @@ func TestNodeEventsFromChanges(t *testing.T) { WantTopic: structs.TopicNode, Name: "batch node deregistered", Setup: func(s *StateStore, tx *txn) error { - require.NoError(t, upsertNodeTxn(tx, tx.Index, testNode())) + must.NoError(t, upsertNodeTxn(tx, tx.Index, testNode())) return upsertNodeTxn(tx, tx.Index, testNode(nodeIDTwo)) }, Mutate: func(s *StateStore, tx *txn) error { @@ -870,7 +869,7 @@ func TestNodeEventsFromChanges(t *testing.T) { WantTopic: structs.TopicNode, Name: "batch node events upserted", Setup: func(s *StateStore, tx *txn) error { - require.NoError(t, upsertNodeTxn(tx, tx.Index, testNode())) + must.NoError(t, upsertNodeTxn(tx, tx.Index, testNode())) return upsertNodeTxn(tx, tx.Index, testNode(nodeIDTwo)) }, Mutate: func(s *StateStore, tx *txn) error { @@ -892,7 +891,7 @@ func TestNodeEventsFromChanges(t *testing.T) { }, } } - require.NoError(t, s.upsertNodeEvents(tx.Index, testNodeID(), eventFn(testNodeID()), tx)) + must.NoError(t, s.upsertNodeEvents(tx.Index, testNodeID(), eventFn(testNodeID()), tx)) return s.upsertNodeEvents(tx.Index, testNodeIDTwo(), eventFn(testNodeIDTwo()), tx) }, WantEvents: []structs.Event{ @@ -926,26 +925,26 @@ func TestNodeEventsFromChanges(t *testing.T) { if tc.Setup != nil { // Bypass publish mechanism for setup setupTx := s.db.WriteTxn(10) - require.NoError(t, tc.Setup(s, setupTx)) + must.NoError(t, tc.Setup(s, setupTx)) setupTx.Txn.Commit() } tx := s.db.WriteTxnMsgT(tc.MsgType, 100) - require.NoError(t, tc.Mutate(s, tx)) + must.NoError(t, tc.Mutate(s, tx)) changes := Changes{Changes: tx.Changes(), Index: 100, MsgType: tc.MsgType} got := eventsFromChanges(tx, changes) - require.NotNil(t, got) + must.NotNil(t, got) - require.Equal(t, len(tc.WantEvents), len(got.Events)) + must.Eq(t, len(tc.WantEvents), len(got.Events)) for idx, g := range got.Events { // assert equality of shared fields want := tc.WantEvents[idx] - require.Equal(t, want.Index, g.Index) - require.Equal(t, want.Key, g.Key) - require.Equal(t, want.Topic, g.Topic) + must.Eq(t, want.Index, g.Index) + must.Eq(t, want.Key, g.Key) + must.Eq(t, want.Topic, g.Topic) switch tc.MsgType { case structs.NodeRegisterRequestType: @@ -953,9 +952,9 @@ func TestNodeEventsFromChanges(t *testing.T) { case structs.NodeDeregisterRequestType: requireNodeDeregistrationEventEqual(t, tc.WantEvents[idx], g) case structs.UpsertNodeEventsType: - requireNodeEventEqual(t, tc.WantEvents[idx], g) + requireNodeEventCount(t, 3, g) default: - require.Fail(t, "unhandled message type") + t.Fatal("unhandled message type") } } }) @@ -976,8 +975,8 @@ func TestNodeDrainEventFromChanges(t *testing.T) { alloc1.NodeID = node.ID alloc2.NodeID = node.ID - require.NoError(t, upsertNodeTxn(setupTx, 10, node)) - require.NoError(t, s.upsertAllocsImpl(100, []*structs.Allocation{alloc1, alloc2}, setupTx)) + must.NoError(t, upsertNodeTxn(setupTx, 10, node)) + must.NoError(t, s.upsertAllocsImpl(100, []*structs.Allocation{alloc1, alloc2}, setupTx)) setupTx.Txn.Commit() // changes @@ -994,21 +993,21 @@ func TestNodeDrainEventFromChanges(t *testing.T) { updatedAt := time.Now() event := &structs.NodeEvent{} - require.NoError(t, s.updateNodeDrainImpl(tx, 100, node.ID, strat, markEligible, updatedAt.UnixNano(), event, nil, "", false)) + must.NoError(t, s.updateNodeDrainImpl(tx, 100, node.ID, strat, markEligible, updatedAt.UnixNano(), event, nil, "", false)) changes := Changes{Changes: tx.Changes(), Index: 100, MsgType: structs.NodeUpdateDrainRequestType} got := eventsFromChanges(tx, changes) - require.Len(t, got.Events, 1) + must.Len(t, 1, got.Events) - require.Equal(t, structs.TopicNode, got.Events[0].Topic) - require.Equal(t, structs.TypeNodeDrain, got.Events[0].Type) - require.Equal(t, uint64(100), got.Events[0].Index) + must.Eq(t, structs.TopicNode, got.Events[0].Topic) + must.Eq(t, structs.TypeNodeDrain, got.Events[0].Type) + must.Eq(t, uint64(100), got.Events[0].Index) nodeEvent, ok := got.Events[0].Payload.(*structs.NodeStreamEvent) - require.True(t, ok) + must.True(t, ok) - require.Equal(t, structs.NodeSchedulingIneligible, nodeEvent.Node.SchedulingEligibility) - require.Equal(t, strat, nodeEvent.Node.DrainStrategy) + must.Eq(t, structs.NodeSchedulingIneligible, nodeEvent.Node.SchedulingEligibility) + must.Eq(t, strat, nodeEvent.Node.DrainStrategy) } func Test_eventsFromChanges_ServiceRegistration(t *testing.T) { @@ -1022,8 +1021,8 @@ func Test_eventsFromChanges_ServiceRegistration(t *testing.T) { // Upsert a service registration. writeTxn := testState.db.WriteTxn(10) updated, err := testState.upsertServiceRegistrationTxn(10, writeTxn, service) - require.True(t, updated) - require.NoError(t, err) + must.True(t, updated) + must.NoError(t, err) writeTxn.Txn.Commit() // Pull the events from the stream. @@ -1031,17 +1030,17 @@ func Test_eventsFromChanges_ServiceRegistration(t *testing.T) { receivedChange := eventsFromChanges(writeTxn, registerChange) // Check the event, and it's payload are what we are expecting. - require.Len(t, receivedChange.Events, 1) - require.Equal(t, structs.TopicService, receivedChange.Events[0].Topic) - require.Equal(t, structs.TypeServiceRegistration, receivedChange.Events[0].Type) - require.Equal(t, uint64(10), receivedChange.Events[0].Index) + must.Len(t, 1, receivedChange.Events) + must.Eq(t, structs.TopicService, receivedChange.Events[0].Topic) + must.Eq(t, structs.TypeServiceRegistration, receivedChange.Events[0].Type) + must.Eq(t, uint64(10), receivedChange.Events[0].Index) eventPayload := receivedChange.Events[0].Payload.(*structs.ServiceRegistrationStreamEvent) - require.Equal(t, service, eventPayload.Service) + must.Eq(t, service, eventPayload.Service) // Delete the previously upserted service registration. deleteTxn := testState.db.WriteTxn(20) - require.NoError(t, testState.deleteServiceRegistrationByIDTxn(uint64(20), deleteTxn, service.Namespace, service.ID)) + must.NoError(t, testState.deleteServiceRegistrationByIDTxn(uint64(20), deleteTxn, service.Namespace, service.ID)) writeTxn.Txn.Commit() // Pull the events from the stream. @@ -1049,13 +1048,13 @@ func Test_eventsFromChanges_ServiceRegistration(t *testing.T) { receivedDeleteChange := eventsFromChanges(deleteTxn, deregisterChange) // Check the event, and it's payload are what we are expecting. - require.Len(t, receivedDeleteChange.Events, 1) - require.Equal(t, structs.TopicService, receivedDeleteChange.Events[0].Topic) - require.Equal(t, structs.TypeServiceDeregistration, receivedDeleteChange.Events[0].Type) - require.Equal(t, uint64(20), receivedDeleteChange.Events[0].Index) + must.Len(t, 1, receivedDeleteChange.Events) + must.Eq(t, structs.TopicService, receivedDeleteChange.Events[0].Topic) + must.Eq(t, structs.TypeServiceDeregistration, receivedDeleteChange.Events[0].Type) + must.Eq(t, uint64(20), receivedDeleteChange.Events[0].Index) eventPayload = receivedChange.Events[0].Payload.(*structs.ServiceRegistrationStreamEvent) - require.Equal(t, service, eventPayload.Service) + must.Eq(t, service, eventPayload.Service) } func Test_eventsFromChanges_ACLRole(t *testing.T) { @@ -1070,8 +1069,8 @@ func Test_eventsFromChanges_ACLRole(t *testing.T) { // linked policies exist. writeTxn := testState.db.WriteTxn(10) updated, err := testState.upsertACLRoleTxn(10, writeTxn, aclRole, true) - require.True(t, updated) - require.NoError(t, err) + must.True(t, updated) + must.NoError(t, err) writeTxn.Txn.Commit() // Pull the events from the stream. @@ -1079,20 +1078,20 @@ func Test_eventsFromChanges_ACLRole(t *testing.T) { receivedChange := eventsFromChanges(writeTxn, upsertChange) // Check the event, and it's payload are what we are expecting. - require.Len(t, receivedChange.Events, 1) - require.Equal(t, structs.TopicACLRole, receivedChange.Events[0].Topic) - require.Equal(t, aclRole.ID, receivedChange.Events[0].Key) - require.Equal(t, aclRole.Name, receivedChange.Events[0].FilterKeys[0]) - require.Equal(t, structs.TypeACLRoleUpserted, receivedChange.Events[0].Type) - require.Equal(t, uint64(10), receivedChange.Events[0].Index) + must.Len(t, 1, receivedChange.Events) + must.Eq(t, structs.TopicACLRole, receivedChange.Events[0].Topic) + must.Eq(t, aclRole.ID, receivedChange.Events[0].Key) + must.Eq(t, aclRole.Name, receivedChange.Events[0].FilterKeys[0]) + must.Eq(t, structs.TypeACLRoleUpserted, receivedChange.Events[0].Type) + must.Eq(t, uint64(10), receivedChange.Events[0].Index) eventPayload := receivedChange.Events[0].Payload.(*structs.ACLRoleStreamEvent) - require.Equal(t, aclRole, eventPayload.ACLRole) + must.Eq(t, aclRole, eventPayload.ACLRole) // Delete the previously upserted ACL role. deleteTxn := testState.db.WriteTxn(20) - require.NoError(t, testState.deleteACLRoleByIDTxn(deleteTxn, aclRole.ID)) - require.NoError(t, deleteTxn.Insert(tableIndex, &IndexEntry{TableACLRoles, 20})) + must.NoError(t, testState.deleteACLRoleByIDTxn(deleteTxn, aclRole.ID)) + must.NoError(t, deleteTxn.Insert(tableIndex, &IndexEntry{TableACLRoles, 20})) deleteTxn.Txn.Commit() // Pull the events from the stream. @@ -1100,15 +1099,15 @@ func Test_eventsFromChanges_ACLRole(t *testing.T) { receivedDeleteChange := eventsFromChanges(deleteTxn, deleteChange) // Check the event, and it's payload are what we are expecting. - require.Len(t, receivedDeleteChange.Events, 1) - require.Equal(t, structs.TopicACLRole, receivedDeleteChange.Events[0].Topic) - require.Equal(t, aclRole.ID, receivedDeleteChange.Events[0].Key) - require.Equal(t, aclRole.Name, receivedDeleteChange.Events[0].FilterKeys[0]) - require.Equal(t, structs.TypeACLRoleDeleted, receivedDeleteChange.Events[0].Type) - require.Equal(t, uint64(20), receivedDeleteChange.Events[0].Index) + must.Len(t, 1, receivedDeleteChange.Events) + must.Eq(t, structs.TopicACLRole, receivedDeleteChange.Events[0].Topic) + must.Eq(t, aclRole.ID, receivedDeleteChange.Events[0].Key) + must.Eq(t, aclRole.Name, receivedDeleteChange.Events[0].FilterKeys[0]) + must.Eq(t, structs.TypeACLRoleDeleted, receivedDeleteChange.Events[0].Type) + must.Eq(t, uint64(20), receivedDeleteChange.Events[0].Index) eventPayload = receivedChange.Events[0].Payload.(*structs.ACLRoleStreamEvent) - require.Equal(t, aclRole, eventPayload.ACLRole) + must.Eq(t, aclRole, eventPayload.ACLRole) } func Test_eventsFromChanges_ACLAuthMethod(t *testing.T) { @@ -1342,9 +1341,9 @@ func requireNodeRegistrationEventEqual(t *testing.T, want, got structs.Event) { gotPayload := got.Payload.(*structs.NodeStreamEvent) // Check payload equality for the fields that we can easily control - require.Equal(t, wantPayload.Node.Status, gotPayload.Node.Status) - require.Equal(t, wantPayload.Node.ID, gotPayload.Node.ID) - require.NotEqual(t, wantPayload.Node.Events, gotPayload.Node.Events) + must.Eq(t, wantPayload.Node.Status, gotPayload.Node.Status) + must.Eq(t, wantPayload.Node.ID, gotPayload.Node.ID) + must.NotEq(t, wantPayload.Node.Events, gotPayload.Node.Events) } func requireNodeDeregistrationEventEqual(t *testing.T, want, got structs.Event) { @@ -1353,14 +1352,13 @@ func requireNodeDeregistrationEventEqual(t *testing.T, want, got structs.Event) wantPayload := want.Payload.(*structs.NodeStreamEvent) gotPayload := got.Payload.(*structs.NodeStreamEvent) - require.Equal(t, wantPayload.Node.ID, gotPayload.Node.ID) - require.NotEqual(t, wantPayload.Node.Events, gotPayload.Node.Events) + must.Eq(t, wantPayload.Node.ID, gotPayload.Node.ID) + must.NotEq(t, wantPayload.Node.Events, gotPayload.Node.Events) } -func requireNodeEventEqual(t *testing.T, want, got structs.Event) { +func requireNodeEventCount(t *testing.T, want int, got structs.Event) { gotPayload := got.Payload.(*structs.NodeStreamEvent) - - require.Len(t, gotPayload.Node.Events, 3) + must.Len(t, want, gotPayload.Node.Events) } type nodeOpts func(n *structs.Node) diff --git a/nomad/state/indexer/indexer_test.go b/nomad/state/indexer/indexer_test.go index 81c790205..a8d739b2c 100644 --- a/nomad/state/indexer/indexer_test.go +++ b/nomad/state/indexer/indexer_test.go @@ -7,12 +7,12 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" + "github.com/shoenig/test/must" ) func Test_IndexBuilder_Time(t *testing.T) { builder := &IndexBuilder{} testTime := time.Date(1987, time.April, 13, 8, 3, 0, 0, time.UTC) builder.Time(testTime) - require.Equal(t, []byte{0, 0, 0, 0, 32, 128, 155, 180}, builder.Bytes()) + must.Eq(t, []byte{0, 0, 0, 0, 32, 128, 155, 180}, builder.Bytes()) } diff --git a/nomad/state/indexer/time_test.go b/nomad/state/indexer/time_test.go index 2458c947c..2b110fbaa 100644 --- a/nomad/state/indexer/time_test.go +++ b/nomad/state/indexer/time_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/stretchr/testify/require" + "github.com/shoenig/test/must" ) func Test_IndexFromTimeQuery(t *testing.T) { @@ -41,8 +41,8 @@ func Test_IndexFromTimeQuery(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { actualOutput, actualError := IndexFromTimeQuery(tc.inputArg) - require.Equal(t, tc.expectedOutputError, actualError) - require.Equal(t, tc.expectedOutputBytes, actualOutput) + must.Eq(t, tc.expectedOutputError, actualError) + must.Eq(t, tc.expectedOutputBytes, actualOutput) }) } } diff --git a/nomad/state/schema_test.go b/nomad/state/schema_test.go index 47415740f..5ea799283 100644 --- a/nomad/state/schema_test.go +++ b/nomad/state/schema_test.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/shoenig/test/must" - "github.com/stretchr/testify/require" ) func TestStateStoreSchema(t *testing.T) { @@ -28,8 +27,6 @@ func TestStateStoreSchema(t *testing.T) { func TestState_singleRecord(t *testing.T) { ci.Parallel(t) - require := require.New(t) - const ( singletonTable = "cluster_meta" singletonIDIdx = "id" @@ -40,7 +37,7 @@ func TestState_singleRecord(t *testing.T) { singletonTable: clusterMetaTableSchema(), }, }) - require.NoError(err) + must.NoError(t, err) // numRecords in table counts all the items in the table, which is expected // to always be 1 since that's the point of the singletonRecord Indexer. @@ -49,7 +46,7 @@ func TestState_singleRecord(t *testing.T) { defer txn.Abort() iter, err := txn.Get(singletonTable, singletonIDIdx) - require.NoError(err) + must.NoError(t, err) num := 0 for item := iter.Next(); item != nil; item = iter.Next() { @@ -64,7 +61,7 @@ func TestState_singleRecord(t *testing.T) { setSingleton := func(s string) { txn := db.Txn(true) err := txn.Insert(singletonTable, s) - require.NoError(err) + must.NoError(t, err) txn.Commit() } @@ -75,9 +72,9 @@ func TestState_singleRecord(t *testing.T) { txn := db.Txn(false) defer txn.Abort() record, err := txn.First(singletonTable, singletonIDIdx) - require.NoError(err) + must.NoError(t, err) s, ok := record.(string) - require.True(ok) + must.True(t, ok) return s } @@ -85,16 +82,16 @@ func TestState_singleRecord(t *testing.T) { // a single "singleton" record existing in the table. setSingleton("one") - require.Equal(1, numRecordsInTable()) - require.Equal("one", first()) + must.Eq(t, 1, numRecordsInTable()) + must.Eq(t, "one", first()) setSingleton("two") - require.Equal(1, numRecordsInTable()) - require.Equal("two", first()) + must.Eq(t, 1, numRecordsInTable()) + must.Eq(t, "two", first()) setSingleton("three") - require.Equal(1, numRecordsInTable()) - require.Equal("three", first()) + must.Eq(t, 1, numRecordsInTable()) + must.Eq(t, "three", first()) } func Test_jobIsGCable(t *testing.T) { @@ -270,8 +267,6 @@ func Test_jobIsGCable(t *testing.T) { func TestState_ScalingPolicyTargetFieldIndex_FromObject(t *testing.T) { ci.Parallel(t) - require := require.New(t) - policy := mock.ScalingPolicy() policy.Target["TestField"] = "test" @@ -281,49 +276,49 @@ func TestState_ScalingPolicyTargetFieldIndex_FromObject(t *testing.T) { // Check if box indexers can find the test field ok, val, err := indexersAllowMissingTrue.FromObject(policy) - require.True(ok) - require.NoError(err) - require.Equal("test\x00", string(val)) + must.True(t, ok) + must.NoError(t, err) + must.Eq(t, "test\x00", string(val)) ok, val, err = indexersAllowMissingFalse.FromObject(policy) - require.True(ok) - require.NoError(err) - require.Equal("test\x00", string(val)) + must.True(t, ok) + must.NoError(t, err) + must.Eq(t, "test\x00", string(val)) // Check for empty field policy.Target["TestField"] = "" ok, val, err = indexersAllowMissingTrue.FromObject(policy) - require.True(ok) - require.NoError(err) - require.Equal("\x00", string(val)) + must.True(t, ok) + must.NoError(t, err) + must.Eq(t, "\x00", string(val)) ok, val, err = indexersAllowMissingFalse.FromObject(policy) - require.True(ok) - require.NoError(err) - require.Equal("\x00", string(val)) + must.True(t, ok) + must.NoError(t, err) + must.Eq(t, "\x00", string(val)) // Check for missing field delete(policy.Target, "TestField") ok, val, err = indexersAllowMissingTrue.FromObject(policy) - require.True(ok) - require.NoError(err) - require.Equal("\x00", string(val)) + must.True(t, ok) + must.NoError(t, err) + must.Eq(t, "\x00", string(val)) ok, val, err = indexersAllowMissingFalse.FromObject(policy) - require.False(ok) - require.NoError(err) - require.Equal("", string(val)) + must.False(t, ok) + must.NoError(t, err) + must.Eq(t, "", string(val)) // Check for invalid input ok, val, err = indexersAllowMissingTrue.FromObject("not-a-scaling-policy") - require.False(ok) - require.Error(err) - require.Equal("", string(val)) + must.False(t, ok) + must.Error(t, err) + must.Eq(t, "", string(val)) ok, val, err = indexersAllowMissingFalse.FromObject("not-a-scaling-policy") - require.False(ok) - require.Error(err) - require.Equal("", string(val)) + must.False(t, ok) + must.Error(t, err) + must.Eq(t, "", string(val)) } diff --git a/nomad/state/state_store_acl_test.go b/nomad/state/state_store_acl_test.go index e7fce7ffb..fdbe227f2 100644 --- a/nomad/state/state_store_acl_test.go +++ b/nomad/state/state_store_acl_test.go @@ -15,7 +15,6 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/shoenig/test/must" - "github.com/stretchr/testify/require" ) func TestStateStore_ACLTokensByExpired(t *testing.T) { @@ -46,17 +45,17 @@ func TestStateStore_ACLTokensByExpired(t *testing.T) { // the state. err := testState.UpsertACLTokens(structs.MsgTypeTestSetup, 10, []*structs.ACLToken{ neverExpireLocalToken, neverExpireGlobalToken}) - require.NoError(t, err) + must.NoError(t, err) iter, err := testState.ACLTokensByExpired(true) - require.NoError(t, err) + must.NoError(t, err) tokens := fromIteratorFunc(iter) - require.Len(t, tokens, 0) + must.Len(t, 0, tokens) iter, err = testState.ACLTokensByExpired(false) - require.NoError(t, err) + must.NoError(t, err) tokens = fromIteratorFunc(iter) - require.Len(t, tokens, 0) + must.Len(t, 0, tokens) // Generate, upsert, and test an expired local token. This token expired // long ago and therefore before all others coming in the tests. It should @@ -65,13 +64,13 @@ func TestStateStore_ACLTokensByExpired(t *testing.T) { expiredLocalToken.ExpirationTime = pointer.Of(expiryTimeThreshold.Add(-48 * time.Hour)) err = testState.UpsertACLTokens(structs.MsgTypeTestSetup, 20, []*structs.ACLToken{expiredLocalToken}) - require.NoError(t, err) + must.NoError(t, err) iter, err = testState.ACLTokensByExpired(false) - require.NoError(t, err) + must.NoError(t, err) tokens = fromIteratorFunc(iter) - require.Len(t, tokens, 1) - require.Equal(t, expiredLocalToken.AccessorID, tokens[0].AccessorID) + must.Len(t, 1, tokens) + must.Eq(t, expiredLocalToken.AccessorID, tokens[0].AccessorID) // Generate, upsert, and test an expired global token. This token expired // long ago and therefore before all others coming in the tests. It should @@ -81,13 +80,13 @@ func TestStateStore_ACLTokensByExpired(t *testing.T) { expiredGlobalToken.ExpirationTime = pointer.Of(expiryTimeThreshold.Add(-48 * time.Hour)) err = testState.UpsertACLTokens(structs.MsgTypeTestSetup, 30, []*structs.ACLToken{expiredGlobalToken}) - require.NoError(t, err) + must.NoError(t, err) iter, err = testState.ACLTokensByExpired(true) - require.NoError(t, err) + must.NoError(t, err) tokens = fromIteratorFunc(iter) - require.Len(t, tokens, 1) - require.Equal(t, expiredGlobalToken.AccessorID, tokens[0].AccessorID) + must.Len(t, 1, tokens) + must.Eq(t, expiredGlobalToken.AccessorID, tokens[0].AccessorID) // This test function allows us to run the same test for local and global // tokens. @@ -113,16 +112,16 @@ func TestStateStore_ACLTokensByExpired(t *testing.T) { } err = testState.UpsertACLTokens(structs.MsgTypeTestSetup, 40, mixedTokens) - require.NoError(t, err) + must.NoError(t, err) // Check the full listing works as expected as the first 11 elements // should all be our expired tokens. Ensure our oldest expired token is // first in the list. iter, err = testState.ACLTokensByExpired(global) - require.NoError(t, err) + must.NoError(t, err) tokens = fromIteratorFunc(iter) - require.ElementsMatch(t, expiredTokens, tokens[:11]) - require.Equal(t, tokens[0], oldToken) + must.SliceContainsAll(t, expiredTokens, tokens[:11]) + must.Eq(t, tokens[0], oldToken) } testFn(expiredLocalToken, false) @@ -150,7 +149,7 @@ func Test_expiresIndexName(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { actualOutput := expiresIndexName(tc.globalInput) - require.Equal(t, tc.expectedOutput, actualOutput) + must.Eq(t, tc.expectedOutput, actualOutput) }) } } @@ -164,7 +163,7 @@ func TestStateStore_UpsertACLRoles(t *testing.T) { // exist. mockedACLRoles := []*structs.ACLRole{mock.ACLRole()} err := testState.UpsertACLRoles(structs.MsgTypeTestSetup, 10, mockedACLRoles, false) - require.ErrorContains(t, err, "policy not found") + must.ErrorContains(t, err, "policy not found") // Create the policies our ACL roles wants to link to and then try the // upsert again. @@ -173,20 +172,20 @@ func TestStateStore_UpsertACLRoles(t *testing.T) { policy2 := mock.ACLPolicy() policy2.Name = "mocked-test-policy-2" - require.NoError(t, testState.UpsertACLPolicies( + must.NoError(t, testState.UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{policy1, policy2})) - require.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 20, mockedACLRoles, false)) + must.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 20, mockedACLRoles, false)) // Check that the index for the table was modified as expected. initialIndex, err := testState.Index(TableACLRoles) - require.NoError(t, err) + must.NoError(t, err) must.Eq(t, 20, initialIndex) // List all the ACL roles in the table, so we can perform a number of tests // on the return array. ws := memdb.NewWatchSet() iter, err := testState.GetACLRoles(ws) - require.NoError(t, err) + must.NoError(t, err) // Count how many table entries we have, to ensure it is the expected // number. @@ -200,13 +199,13 @@ func TestStateStore_UpsertACLRoles(t *testing.T) { must.Eq(t, 20, aclRole.CreateIndex) must.Eq(t, 20, aclRole.ModifyIndex) } - require.Equal(t, 1, count, "incorrect number of ACL roles found") + must.Eq(t, 1, count, must.Sprint("incorrect number of ACL roles found")) // Try writing the same ACL roles to state which should not result in an // update to the table index. - require.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 30, mockedACLRoles, false)) + must.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 30, mockedACLRoles, false)) reInsertActualIndex, err := testState.Index(TableACLRoles) - require.NoError(t, err) + must.NoError(t, err) must.Eq(t, 20, reInsertActualIndex) // Make a change to one of the ACL roles and ensure this update is accepted @@ -214,17 +213,17 @@ func TestStateStore_UpsertACLRoles(t *testing.T) { updatedMockedACLRole := mockedACLRoles[0].Copy() updatedMockedACLRole.Policies = []*structs.ACLRolePolicyLink{{Name: "mocked-test-policy-1"}} updatedMockedACLRole.SetHash() - require.NoError(t, testState.UpsertACLRoles( + must.NoError(t, testState.UpsertACLRoles( structs.MsgTypeTestSetup, 30, []*structs.ACLRole{updatedMockedACLRole}, false)) // Check that the index for the table was modified as expected. updatedIndex, err := testState.Index(TableACLRoles) - require.NoError(t, err) + must.NoError(t, err) must.Eq(t, 30, updatedIndex) // List the ACL roles in state. iter, err = testState.GetACLRoles(ws) - require.NoError(t, err) + must.NoError(t, err) // Count how many table entries we have, to ensure it is the expected // number. @@ -238,17 +237,17 @@ func TestStateStore_UpsertACLRoles(t *testing.T) { must.Eq(t, 20, aclRole.CreateIndex) must.Eq(t, 30, aclRole.ModifyIndex) } - require.Equal(t, 1, count, "incorrect number of ACL roles found") + must.Eq(t, 1, count, must.Sprint("incorrect number of ACL roles found")) // Now try inserting an ACL role using the missing policies' argument to // simulate replication. replicatedACLRole := mock.ACLRole() replicatedACLRole.Policies = []*structs.ACLRolePolicyLink{{Name: "nope"}} - require.NoError(t, testState.UpsertACLRoles( + must.NoError(t, testState.UpsertACLRoles( structs.MsgTypeTestSetup, 40, []*structs.ACLRole{replicatedACLRole}, true)) replicatedACLRoleResp, err := testState.GetACLRoleByName(ws, replicatedACLRole.Name) - require.NoError(t, err) + must.NoError(t, err) must.Eq(t, replicatedACLRole.Hash, replicatedACLRoleResp.Hash) // Try adding a new ACL role, which has a name clash with an existing @@ -258,7 +257,7 @@ func TestStateStore_UpsertACLRoles(t *testing.T) { err = testState.UpsertACLRoles(structs.MsgTypeTestSetup, 50, []*structs.ACLRole{dupRoleName}, false) - require.ErrorContains(t, err, fmt.Sprintf("ACL role with name %s already exists", dupRoleName.Name)) + must.ErrorContains(t, err, fmt.Sprintf("ACL role with name %s already exists", dupRoleName.Name)) } func TestStateStore_ValidateACLRolePolicyLinks(t *testing.T) { @@ -270,23 +269,23 @@ func TestStateStore_ValidateACLRolePolicyLinks(t *testing.T) { // This should error as no policies exist within state. err := testState.UpsertACLRoles(structs.MsgTypeTestSetup, 10, mockedACLRoles, false) - require.ErrorContains(t, err, "ACL policy not found") + must.ErrorContains(t, err, "ACL policy not found") // Upsert one ACL policy and retry the role which should still fail. policy1 := mock.ACLPolicy() policy1.Name = "mocked-test-policy-1" - require.NoError(t, testState.UpsertACLPolicies(structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{policy1})) + must.NoError(t, testState.UpsertACLPolicies(structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{policy1})) err = testState.UpsertACLRoles(structs.MsgTypeTestSetup, 20, mockedACLRoles, false) - require.ErrorContains(t, err, "ACL policy not found") + must.ErrorContains(t, err, "ACL policy not found") // Upsert the second ACL policy. The ACL role should now upsert into state // without error. policy2 := mock.ACLPolicy() policy2.Name = "mocked-test-policy-2" - require.NoError(t, testState.UpsertACLPolicies(structs.MsgTypeTestSetup, 20, []*structs.ACLPolicy{policy2})) - require.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 30, mockedACLRoles, false)) + must.NoError(t, testState.UpsertACLPolicies(structs.MsgTypeTestSetup, 20, []*structs.ACLPolicy{policy2})) + must.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 30, mockedACLRoles, false)) } func TestStateStore_DeleteACLRolesByID(t *testing.T) { @@ -299,37 +298,37 @@ func TestStateStore_DeleteACLRolesByID(t *testing.T) { policy2 := mock.ACLPolicy() policy2.Name = "mocked-test-policy-2" - require.NoError(t, testState.UpsertACLPolicies( + must.NoError(t, testState.UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{policy1, policy2})) // Generate a some mocked ACL roles for testing and upsert these straight // into state. mockedACLRoles := []*structs.ACLRole{mock.ACLRole(), mock.ACLRole()} - require.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 10, mockedACLRoles, false)) + must.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 10, mockedACLRoles, false)) // Try and delete a role using a name that doesn't exist. This should // return an error and not change the index for the table. err := testState.DeleteACLRolesByID(structs.MsgTypeTestSetup, 20, []string{"not-a-role"}) - require.ErrorContains(t, err, "ACL role not found") + must.ErrorContains(t, err, "ACL role not found") tableIndex, err := testState.Index(TableACLRoles) - require.NoError(t, err) + must.NoError(t, err) must.Eq(t, 10, tableIndex) // Delete one of the previously upserted ACL roles. This should succeed // and modify the table index. err = testState.DeleteACLRolesByID(structs.MsgTypeTestSetup, 20, []string{mockedACLRoles[0].ID}) - require.NoError(t, err) + must.NoError(t, err) tableIndex, err = testState.Index(TableACLRoles) - require.NoError(t, err) + must.NoError(t, err) must.Eq(t, 20, tableIndex) // List the ACL roles and ensure we now only have one present and that it // is the one we expect. ws := memdb.NewWatchSet() iter, err := testState.GetACLRoles(ws) - require.NoError(t, err) + must.NoError(t, err) var aclRoles []*structs.ACLRole @@ -337,28 +336,28 @@ func TestStateStore_DeleteACLRolesByID(t *testing.T) { aclRoles = append(aclRoles, raw.(*structs.ACLRole)) } - require.Len(t, aclRoles, 1, "incorrect number of ACL roles found") - require.True(t, aclRoles[0].Equal(mockedACLRoles[1])) + must.Len(t, 1, aclRoles, must.Sprint("incorrect number of ACL roles found")) + must.True(t, aclRoles[0].Equal(mockedACLRoles[1])) // Delete the final remaining ACL role. This should succeed and modify the // table index. err = testState.DeleteACLRolesByID(structs.MsgTypeTestSetup, 30, []string{mockedACLRoles[1].ID}) - require.NoError(t, err) + must.NoError(t, err) tableIndex, err = testState.Index(TableACLRoles) - require.NoError(t, err) + must.NoError(t, err) must.Eq(t, 30, tableIndex) // List the ACL roles and ensure we have zero entries. iter, err = testState.GetACLRoles(ws) - require.NoError(t, err) + must.NoError(t, err) aclRoles = []*structs.ACLRole{} for raw := iter.Next(); raw != nil; raw = iter.Next() { aclRoles = append(aclRoles, raw.(*structs.ACLRole)) } - require.Len(t, aclRoles, 0, "incorrect number of ACL roles found") + must.Len(t, 0, aclRoles, must.Sprint("incorrect number of ACL roles found")) } func TestStateStore_GetACLRoles(t *testing.T) { @@ -371,18 +370,18 @@ func TestStateStore_GetACLRoles(t *testing.T) { policy2 := mock.ACLPolicy() policy2.Name = "mocked-test-policy-2" - require.NoError(t, testState.UpsertACLPolicies( + must.NoError(t, testState.UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{policy1, policy2})) // Generate a some mocked ACL roles for testing and upsert these straight // into state. mockedACLRoles := []*structs.ACLRole{mock.ACLRole(), mock.ACLRole()} - require.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 10, mockedACLRoles, false)) + must.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 10, mockedACLRoles, false)) // List the ACL roles and ensure they are exactly as we expect. ws := memdb.NewWatchSet() iter, err := testState.GetACLRoles(ws) - require.NoError(t, err) + must.NoError(t, err) var aclRoles []*structs.ACLRole @@ -396,7 +395,7 @@ func TestStateStore_GetACLRoles(t *testing.T) { expected[i].ModifyIndex = 10 } - require.ElementsMatch(t, aclRoles, expected) + must.SliceContainsAll(t, aclRoles, expected) } func TestStateStore_GetACLRoleByID(t *testing.T) { @@ -409,29 +408,29 @@ func TestStateStore_GetACLRoleByID(t *testing.T) { policy2 := mock.ACLPolicy() policy2.Name = "mocked-test-policy-2" - require.NoError(t, testState.UpsertACLPolicies( + must.NoError(t, testState.UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{policy1, policy2})) // Generate a some mocked ACL roles for testing and upsert these straight // into state. mockedACLRoles := []*structs.ACLRole{mock.ACLRole(), mock.ACLRole()} - require.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 10, mockedACLRoles, false)) + must.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 10, mockedACLRoles, false)) ws := memdb.NewWatchSet() // Try reading an ACL role that does not exist. aclRole, err := testState.GetACLRoleByID(ws, "not-a-role") - require.NoError(t, err) - require.Nil(t, aclRole) + must.NoError(t, err) + must.Nil(t, aclRole) // Read the two ACL roles that we should find. aclRole, err = testState.GetACLRoleByID(ws, mockedACLRoles[0].ID) - require.NoError(t, err) - require.Equal(t, mockedACLRoles[0], aclRole) + must.NoError(t, err) + must.Eq(t, mockedACLRoles[0], aclRole) aclRole, err = testState.GetACLRoleByID(ws, mockedACLRoles[1].ID) - require.NoError(t, err) - require.Equal(t, mockedACLRoles[1], aclRole) + must.NoError(t, err) + must.Eq(t, mockedACLRoles[1], aclRole) } func TestStateStore_GetACLRoleByName(t *testing.T) { @@ -444,29 +443,29 @@ func TestStateStore_GetACLRoleByName(t *testing.T) { policy2 := mock.ACLPolicy() policy2.Name = "mocked-test-policy-2" - require.NoError(t, testState.UpsertACLPolicies( + must.NoError(t, testState.UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{policy1, policy2})) // Generate a some mocked ACL roles for testing and upsert these straight // into state. mockedACLRoles := []*structs.ACLRole{mock.ACLRole(), mock.ACLRole()} - require.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 10, mockedACLRoles, false)) + must.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 10, mockedACLRoles, false)) ws := memdb.NewWatchSet() // Try reading an ACL role that does not exist. aclRole, err := testState.GetACLRoleByName(ws, "not-a-role") - require.NoError(t, err) - require.Nil(t, aclRole) + must.NoError(t, err) + must.Nil(t, aclRole) // Read the two ACL roles that we should find. aclRole, err = testState.GetACLRoleByName(ws, mockedACLRoles[0].Name) - require.NoError(t, err) - require.Equal(t, mockedACLRoles[0], aclRole) + must.NoError(t, err) + must.Eq(t, mockedACLRoles[0], aclRole) aclRole, err = testState.GetACLRoleByName(ws, mockedACLRoles[1].Name) - require.NoError(t, err) - require.Equal(t, mockedACLRoles[1], aclRole) + must.NoError(t, err) + must.Eq(t, mockedACLRoles[1], aclRole) } func TestStateStore_GetACLRoleByIDPrefix(t *testing.T) { @@ -479,7 +478,7 @@ func TestStateStore_GetACLRoleByIDPrefix(t *testing.T) { policy2 := mock.ACLPolicy() policy2.Name = "mocked-test-policy-2" - require.NoError(t, testState.UpsertACLPolicies( + must.NoError(t, testState.UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{policy1, policy2})) // Generate a some mocked ACL roles for testing and upsert these straight @@ -488,29 +487,29 @@ func TestStateStore_GetACLRoleByIDPrefix(t *testing.T) { mockedACLRoles := []*structs.ACLRole{mock.ACLRole(), mock.ACLRole()} mockedACLRoles[0].ID = "test-prefix-" + uuid.Generate() mockedACLRoles[1].ID = "test-prefix-" + uuid.Generate() - require.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 10, mockedACLRoles, false)) + must.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 10, mockedACLRoles, false)) ws := memdb.NewWatchSet() // Try using a prefix that doesn't match any entries. iter, err := testState.GetACLRoleByIDPrefix(ws, "nope") - require.NoError(t, err) + must.NoError(t, err) var aclRoles []*structs.ACLRole for raw := iter.Next(); raw != nil; raw = iter.Next() { aclRoles = append(aclRoles, raw.(*structs.ACLRole)) } - require.Len(t, aclRoles, 0) + must.Len(t, 0, aclRoles) // Use a prefix which should match two entries in state. iter, err = testState.GetACLRoleByIDPrefix(ws, "test-prefix-") - require.NoError(t, err) + must.NoError(t, err) aclRoles = []*structs.ACLRole{} for raw := iter.Next(); raw != nil; raw = iter.Next() { aclRoles = append(aclRoles, raw.(*structs.ACLRole)) } - require.Len(t, aclRoles, 2) + must.Len(t, 2, aclRoles) } func TestStateStore_fixTokenRoleLinks(t *testing.T) { @@ -531,26 +530,26 @@ func TestStateStore_fixTokenRoleLinks(t *testing.T) { policy2 := mock.ACLPolicy() policy2.Name = "mocked-test-policy-2" - require.NoError(t, testState.UpsertACLPolicies( + must.NoError(t, testState.UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{policy1, policy2})) // Generate a some mocked ACL roles for testing and upsert these straight // into state. mockedACLRoles := []*structs.ACLRole{mock.ACLRole(), mock.ACLRole()} - require.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 20, mockedACLRoles, false)) + must.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 20, mockedACLRoles, false)) // Create an ACL token linking to the ACL role. token1 := mock.ACLToken() token1.Roles = []*structs.ACLTokenRoleLink{{ID: mockedACLRoles[0].ID}} - require.NoError(t, testState.UpsertACLTokens( + must.NoError(t, testState.UpsertACLTokens( structs.MsgTypeTestSetup, 20, []*structs.ACLToken{token1})) // Perform the fix and check the returned token contains the // correct roles. readTxn := testState.db.ReadTxn() outputToken, err := testState.fixTokenRoleLinks(readTxn, token1) - require.NoError(t, err) - require.Equal(t, outputToken.Roles, []*structs.ACLTokenRoleLink{{ + must.NoError(t, err) + must.Eq(t, outputToken.Roles, []*structs.ACLTokenRoleLink{{ Name: mockedACLRoles[0].Name, ID: mockedACLRoles[0].ID, }}) }, @@ -566,31 +565,31 @@ func TestStateStore_fixTokenRoleLinks(t *testing.T) { policy2 := mock.ACLPolicy() policy2.Name = "mocked-test-policy-2" - require.NoError(t, testState.UpsertACLPolicies( + must.NoError(t, testState.UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{policy1, policy2})) // Generate a some mocked ACL roles for testing and upsert these straight // into state. mockedACLRoles := []*structs.ACLRole{mock.ACLRole(), mock.ACLRole()} - require.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 20, mockedACLRoles, false)) + must.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 20, mockedACLRoles, false)) // Create an ACL token linking to the ACL roles. token1 := mock.ACLToken() token1.Roles = []*structs.ACLTokenRoleLink{{ID: mockedACLRoles[0].ID}, {ID: mockedACLRoles[1].ID}} - require.NoError(t, testState.UpsertACLTokens( + must.NoError(t, testState.UpsertACLTokens( structs.MsgTypeTestSetup, 30, []*structs.ACLToken{token1})) // Now delete one of the ACL roles from state. - require.NoError(t, testState.DeleteACLRolesByID( + must.NoError(t, testState.DeleteACLRolesByID( structs.MsgTypeTestSetup, 40, []string{mockedACLRoles[0].ID})) // Perform the fix and check the returned token contains the // correct roles. readTxn := testState.db.ReadTxn() outputToken, err := testState.fixTokenRoleLinks(readTxn, token1) - require.NoError(t, err) - require.Len(t, outputToken.Roles, 1) - require.Equal(t, outputToken.Roles, []*structs.ACLTokenRoleLink{{ + must.NoError(t, err) + must.Len(t, 1, outputToken.Roles) + must.Eq(t, outputToken.Roles, []*structs.ACLTokenRoleLink{{ Name: mockedACLRoles[1].Name, ID: mockedACLRoles[1].ID, }}) }, @@ -606,31 +605,31 @@ func TestStateStore_fixTokenRoleLinks(t *testing.T) { policy2 := mock.ACLPolicy() policy2.Name = "mocked-test-policy-2" - require.NoError(t, testState.UpsertACLPolicies( + must.NoError(t, testState.UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{policy1, policy2})) // Generate a some mocked ACL roles for testing and upsert these straight // into state. mockedACLRoles := []*structs.ACLRole{mock.ACLRole(), mock.ACLRole()} - require.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 20, mockedACLRoles, false)) + must.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 20, mockedACLRoles, false)) // Create an ACL token linking to the ACL roles. token1 := mock.ACLToken() token1.Roles = []*structs.ACLTokenRoleLink{{ID: mockedACLRoles[0].ID}, {ID: mockedACLRoles[1].ID}} - require.NoError(t, testState.UpsertACLTokens( + must.NoError(t, testState.UpsertACLTokens( structs.MsgTypeTestSetup, 30, []*structs.ACLToken{token1})) // Now change the name of one of the ACL roles. mockedACLRoles[0].Name = "badger-badger-badger" - require.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 40, mockedACLRoles, false)) + must.NoError(t, testState.UpsertACLRoles(structs.MsgTypeTestSetup, 40, mockedACLRoles, false)) // Perform the fix and check the returned token contains the // correct roles. readTxn := testState.db.ReadTxn() outputToken, err := testState.fixTokenRoleLinks(readTxn, token1) - require.NoError(t, err) - require.Len(t, outputToken.Roles, 2) - require.ElementsMatch(t, outputToken.Roles, []*structs.ACLTokenRoleLink{ + must.NoError(t, err) + must.Len(t, 2, outputToken.Roles) + must.SliceContainsAll(t, outputToken.Roles, []*structs.ACLTokenRoleLink{ {Name: mockedACLRoles[0].Name, ID: mockedACLRoles[0].ID}, {Name: mockedACLRoles[1].Name, ID: mockedACLRoles[1].ID}, }) diff --git a/nomad/state/state_store_service_registration_test.go b/nomad/state/state_store_service_registration_test.go index 6f050a2c3..36c379343 100644 --- a/nomad/state/state_store_service_registration_test.go +++ b/nomad/state/state_store_service_registration_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" - "github.com/stretchr/testify/require" + "github.com/shoenig/test/must" ) func TestStateStore_UpsertServiceRegistrations(t *testing.T) { @@ -26,18 +26,18 @@ func TestStateStore_UpsertServiceRegistrations(t *testing.T) { // Perform the initial upsert of service registrations. err := testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, insertIndex, services) - require.NoError(t, err) + must.NoError(t, err) // Check that the index for the table was modified as expected. initialIndex, err := testState.Index(TableServiceRegistrations) - require.NoError(t, err) - require.Equal(t, insertIndex, initialIndex) + must.NoError(t, err) + must.Eq(t, insertIndex, initialIndex) // List all the service registrations in the table, so we can perform a // number of tests on the return array. ws := memdb.NewWatchSet() iter, err := testState.GetServiceRegistrations(ws) - require.NoError(t, err) + must.NoError(t, err) // Count how many table entries we have, to ensure it is the expected // number. @@ -48,20 +48,22 @@ func TestStateStore_UpsertServiceRegistrations(t *testing.T) { // Ensure the create and modify indexes are populated correctly. serviceReg := raw.(*structs.ServiceRegistration) - require.Equal(t, insertIndex, serviceReg.CreateIndex, "incorrect create index", serviceReg.ID) - require.Equal(t, insertIndex, serviceReg.ModifyIndex, "incorrect modify index", serviceReg.ID) + must.Eq(t, insertIndex, serviceReg.CreateIndex, + must.Sprintf("incorrect create index for %s", serviceReg.ID)) + must.Eq(t, insertIndex, serviceReg.ModifyIndex, + must.Sprintf("incorrect modify index for %s", serviceReg.ID)) } - require.Equal(t, 2, count, "incorrect number of service registrations found") + must.Eq(t, 2, count, must.Sprint("incorrect number of service registrations found")) // SubTest Marker: This section attempts to upsert the exact same service // registrations without any modification. In this case, the index table // should not be updated, indicating no write actually happened due to // equality checking. reInsertIndex := uint64(30) - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, reInsertIndex, services)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, reInsertIndex, services)) reInsertActualIndex, err := testState.Index(TableServiceRegistrations) - require.NoError(t, err) - require.Equal(t, insertIndex, reInsertActualIndex, "index should not have changed") + must.NoError(t, err) + must.Eq(t, insertIndex, reInsertActualIndex, must.Sprint("index should not have changed")) // SubTest Marker: This section modifies a single one of the previously // inserted service registrations and performs an upsert. This ensures the @@ -72,16 +74,16 @@ func TestStateStore_UpsertServiceRegistrations(t *testing.T) { services1Update := []*structs.ServiceRegistration{service1Update} update1Index := uint64(40) - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, update1Index, services1Update)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, update1Index, services1Update)) // Check that the index for the table was modified as expected. updateActualIndex, err := testState.Index(TableServiceRegistrations) - require.NoError(t, err) - require.Equal(t, update1Index, updateActualIndex, "index should have changed") + must.NoError(t, err) + must.Eq(t, update1Index, updateActualIndex, must.Sprint("index should have changed")) // Get the service registrations from the table. iter, err = testState.GetServiceRegistrations(ws) - require.NoError(t, err) + must.NoError(t, err) // Iterate all the stored registrations and assert they are as expected. for raw := iter.Next(); raw != nil; raw = iter.Next() { @@ -98,8 +100,10 @@ func TestStateStore_UpsertServiceRegistrations(t *testing.T) { t.Errorf("unknown service registration found: %s", serviceReg.ID) continue } - require.Equal(t, insertIndex, serviceReg.CreateIndex, "incorrect create index", serviceReg.ID) - require.Equal(t, expectedModifyIndex, serviceReg.ModifyIndex, "incorrect modify index", serviceReg.ID) + must.Eq(t, insertIndex, serviceReg.CreateIndex, + must.Sprintf("incorrect create index for %s", serviceReg.ID)) + must.Eq(t, expectedModifyIndex, serviceReg.ModifyIndex, + must.Sprintf("incorrect modify index for %s", serviceReg.ID)) } // SubTest Marker: Here we modify the second registration but send an @@ -109,16 +113,16 @@ func TestStateStore_UpsertServiceRegistrations(t *testing.T) { services2Update := []*structs.ServiceRegistration{service1Update, service2Update} update2Index := uint64(50) - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, update2Index, services2Update)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, update2Index, services2Update)) // Check that the index for the table was modified as expected. update2ActualIndex, err := testState.Index(TableServiceRegistrations) - require.NoError(t, err) - require.Equal(t, update2Index, update2ActualIndex, "index should have changed") + must.NoError(t, err) + must.Eq(t, update2Index, update2ActualIndex, must.Sprint("index should have changed")) // Get the service registrations from the table. iter, err = testState.GetServiceRegistrations(ws) - require.NoError(t, err) + must.NoError(t, err) // Iterate all the stored registrations and assert they are as expected. for raw := iter.Next(); raw != nil; raw = iter.Next() { @@ -140,9 +144,12 @@ func TestStateStore_UpsertServiceRegistrations(t *testing.T) { t.Errorf("unknown service registration found: %s", serviceReg.ID) continue } - require.Equal(t, insertIndex, serviceReg.CreateIndex, "incorrect create index", serviceReg.ID) - require.Equal(t, expectedModifyIndex, serviceReg.ModifyIndex, "incorrect modify index", serviceReg.ID) - require.True(t, expectedServiceReg.Equal(serviceReg)) + + must.Eq(t, insertIndex, serviceReg.CreateIndex, + must.Sprintf("incorrect create index for %s", serviceReg.ID)) + must.Eq(t, expectedModifyIndex, serviceReg.ModifyIndex, + must.Sprintf("incorrect modify index for %s", serviceReg.ID)) + must.True(t, expectedServiceReg.Equal(serviceReg)) } } @@ -159,31 +166,31 @@ func TestStateStore_DeleteServiceRegistrationByID(t *testing.T) { initialIndex := uint64(10) err := testState.DeleteServiceRegistrationByID( structs.MsgTypeTestSetup, initialIndex, services[0].Namespace, services[0].ID) - require.EqualError(t, err, "service registration not found") + must.EqError(t, err, "service registration not found") actualInitialIndex, err := testState.Index(TableServiceRegistrations) - require.NoError(t, err) - require.Equal(t, uint64(0), actualInitialIndex, "index should not have changed") + must.NoError(t, err) + must.Eq(t, uint64(0), actualInitialIndex, must.Sprint("index should not have changed")) // SubTest Marker: This section upserts two registrations, deletes one, // then ensure the remaining is left as expected. - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) // Perform the delete. delete1Index := uint64(20) - require.NoError(t, testState.DeleteServiceRegistrationByID( + must.NoError(t, testState.DeleteServiceRegistrationByID( structs.MsgTypeTestSetup, delete1Index, services[0].Namespace, services[0].ID)) // Check that the index for the table was modified as expected. actualDelete1Index, err := testState.Index(TableServiceRegistrations) - require.NoError(t, err) - require.Equal(t, delete1Index, actualDelete1Index, "index should have changed") + must.NoError(t, err) + must.Eq(t, delete1Index, actualDelete1Index, must.Sprint("index should have changed")) ws := memdb.NewWatchSet() // Get the service registrations from the table. iter, err := testState.GetServiceRegistrations(ws) - require.NoError(t, err) + must.NoError(t, err) var delete1Count int @@ -192,22 +199,22 @@ func TestStateStore_DeleteServiceRegistrationByID(t *testing.T) { for raw := iter.Next(); raw != nil; raw = iter.Next() { delete1Count++ } - require.Equal(t, 1, delete1Count, "unexpected number of registrations in table") + must.Eq(t, 1, delete1Count, must.Sprint("unexpected number of registrations in table")) // SubTest Marker: Delete the remaining registration and ensure all indexes // are updated as expected and the table is empty. delete2Index := uint64(30) - require.NoError(t, testState.DeleteServiceRegistrationByID( + must.NoError(t, testState.DeleteServiceRegistrationByID( structs.MsgTypeTestSetup, delete2Index, services[1].Namespace, services[1].ID)) // Check that the index for the table was modified as expected. actualDelete2Index, err := testState.Index(TableServiceRegistrations) - require.NoError(t, err) - require.Equal(t, delete2Index, actualDelete2Index, "index should have changed") + must.NoError(t, err) + must.Eq(t, delete2Index, actualDelete2Index, must.Sprint("index should have changed")) // Get the service registrations from the table. iter, err = testState.GetServiceRegistrations(ws) - require.NoError(t, err) + must.NoError(t, err) var delete2Count int @@ -216,7 +223,7 @@ func TestStateStore_DeleteServiceRegistrationByID(t *testing.T) { for raw := iter.Next(); raw != nil; raw = iter.Next() { delete2Count++ } - require.Equal(t, 0, delete2Count, "unexpected number of registrations in table") + must.Eq(t, 0, delete2Count, must.Sprint("unexpected number of registrations in table")) } func TestStateStore_DeleteServiceRegistrationByNodeID(t *testing.T) { @@ -230,32 +237,31 @@ func TestStateStore_DeleteServiceRegistrationByNodeID(t *testing.T) { // by a nodeID that does not exist. This is easy to perform here as the // state is empty. initialIndex := uint64(10) - require.NoError(t, - testState.DeleteServiceRegistrationByNodeID(structs.MsgTypeTestSetup, initialIndex, services[0].NodeID)) + must.NoError(t, testState.DeleteServiceRegistrationByNodeID(structs.MsgTypeTestSetup, initialIndex, services[0].NodeID)) actualInitialIndex, err := testState.Index(TableServiceRegistrations) - require.NoError(t, err) - require.Equal(t, uint64(0), actualInitialIndex, "index should not have changed") + must.NoError(t, err) + must.Eq(t, uint64(0), actualInitialIndex, must.Sprint("index should not have changed")) // SubTest Marker: This section upserts two registrations then deletes one // by using the nodeID. - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) // Perform the delete. delete1Index := uint64(20) - require.NoError(t, testState.DeleteServiceRegistrationByNodeID( + must.NoError(t, testState.DeleteServiceRegistrationByNodeID( structs.MsgTypeTestSetup, delete1Index, services[0].NodeID)) // Check that the index for the table was modified as expected. actualDelete1Index, err := testState.Index(TableServiceRegistrations) - require.NoError(t, err) - require.Equal(t, delete1Index, actualDelete1Index, "index should have changed") + must.NoError(t, err) + must.Eq(t, delete1Index, actualDelete1Index, must.Sprint("index should have changed")) ws := memdb.NewWatchSet() // Get the service registrations from the table. iter, err := testState.GetServiceRegistrations(ws) - require.NoError(t, err) + must.NoError(t, err) var delete1Count int @@ -264,7 +270,7 @@ func TestStateStore_DeleteServiceRegistrationByNodeID(t *testing.T) { for raw := iter.Next(); raw != nil; raw = iter.Next() { delete1Count++ } - require.Equal(t, 1, delete1Count, "unexpected number of registrations in table") + must.Eq(t, 1, delete1Count, must.Sprint("unexpected number of registrations in table")) // SubTest Marker: Add multiple service registrations for a single nodeID // then delete these via the nodeID. @@ -289,21 +295,20 @@ func TestStateStore_DeleteServiceRegistrationByNodeID(t *testing.T) { // Upsert the new service registrations. delete2UpsertIndex := uint64(30) - require.NoError(t, - testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, delete2UpsertIndex, delete2NodeServices)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, delete2UpsertIndex, delete2NodeServices)) delete2Index := uint64(40) - require.NoError(t, testState.DeleteServiceRegistrationByNodeID( + must.NoError(t, testState.DeleteServiceRegistrationByNodeID( structs.MsgTypeTestSetup, delete2Index, delete2NodeID)) // Check that the index for the table was modified as expected. actualDelete2Index, err := testState.Index(TableServiceRegistrations) - require.NoError(t, err) - require.Equal(t, delete2Index, actualDelete2Index, "index should have changed") + must.NoError(t, err) + must.Eq(t, delete2Index, actualDelete2Index, must.Sprint("index should have changed")) // Get the service registrations from the table. iter, err = testState.GetServiceRegistrations(ws) - require.NoError(t, err) + must.NoError(t, err) var delete2Count int @@ -312,7 +317,7 @@ func TestStateStore_DeleteServiceRegistrationByNodeID(t *testing.T) { for raw := iter.Next(); raw != nil; raw = iter.Next() { delete2Count++ } - require.Equal(t, 0, delete2Count, "unexpected number of registrations in table") + must.Eq(t, 0, delete2Count, must.Sprint("unexpected number of registrations in table")) } func TestStateStore_GetServiceRegistrations(t *testing.T) { @@ -322,12 +327,12 @@ func TestStateStore_GetServiceRegistrations(t *testing.T) { // Generate some test services and upsert them. services := mock.ServiceRegistrations() initialIndex := uint64(10) - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) // Read the service registrations and check the objects. ws := memdb.NewWatchSet() iter, err := testState.GetServiceRegistrations(ws) - require.NoError(t, err) + must.NoError(t, err) var count int @@ -335,19 +340,22 @@ func TestStateStore_GetServiceRegistrations(t *testing.T) { count++ serviceReg := raw.(*structs.ServiceRegistration) - require.Equal(t, initialIndex, serviceReg.CreateIndex, "incorrect create index", serviceReg.ID) - require.Equal(t, initialIndex, serviceReg.ModifyIndex, "incorrect modify index", serviceReg.ID) + + must.Eq(t, initialIndex, serviceReg.CreateIndex, + must.Sprintf("incorrect create index for %s", serviceReg.ID)) + must.Eq(t, initialIndex, serviceReg.ModifyIndex, + must.Sprintf("incorrect modify index for %s", serviceReg.ID)) switch serviceReg.ID { case services[0].ID: - require.Equal(t, services[0], serviceReg) + must.Eq(t, services[0], serviceReg) case services[1].ID: - require.Equal(t, services[1], serviceReg) + must.Eq(t, services[1], serviceReg) default: t.Errorf("unknown service registration found: %s", serviceReg.ID) } } - require.Equal(t, 2, count) + must.Eq(t, 2, count) } func TestStateStore_GetServiceRegistrationsByNamespace(t *testing.T) { @@ -357,50 +365,54 @@ func TestStateStore_GetServiceRegistrationsByNamespace(t *testing.T) { // Generate some test services and upsert them. services := mock.ServiceRegistrations() initialIndex := uint64(10) - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) // Look up services using the namespace of the first service. ws := memdb.NewWatchSet() iter, err := testState.GetServiceRegistrationsByNamespace(ws, services[0].Namespace) - require.NoError(t, err) + must.NoError(t, err) var count1 int for raw := iter.Next(); raw != nil; raw = iter.Next() { count1++ serviceReg := raw.(*structs.ServiceRegistration) - require.Equal(t, initialIndex, serviceReg.CreateIndex, "incorrect create index", serviceReg.ID) - require.Equal(t, initialIndex, serviceReg.ModifyIndex, "incorrect modify index", serviceReg.ID) - require.Equal(t, services[0].Namespace, serviceReg.Namespace) + must.Eq(t, initialIndex, serviceReg.CreateIndex, + must.Sprintf("incorrect create index for %s", serviceReg.ID)) + must.Eq(t, initialIndex, serviceReg.ModifyIndex, + must.Sprintf("incorrect modify index for %s", serviceReg.ID)) + must.Eq(t, services[0].Namespace, serviceReg.Namespace) } - require.Equal(t, 1, count1) + must.Eq(t, 1, count1) // Look up services using the namespace of the second service. iter, err = testState.GetServiceRegistrationsByNamespace(ws, services[1].Namespace) - require.NoError(t, err) + must.NoError(t, err) var count2 int for raw := iter.Next(); raw != nil; raw = iter.Next() { count2++ serviceReg := raw.(*structs.ServiceRegistration) - require.Equal(t, initialIndex, serviceReg.CreateIndex, "incorrect create index", serviceReg.ID) - require.Equal(t, initialIndex, serviceReg.ModifyIndex, "incorrect modify index", serviceReg.ID) - require.Equal(t, services[1].Namespace, serviceReg.Namespace) + must.Eq(t, initialIndex, serviceReg.CreateIndex, + must.Sprintf("incorrect create index for %s", serviceReg.ID)) + must.Eq(t, initialIndex, serviceReg.ModifyIndex, + must.Sprintf("incorrect modify index for %s", serviceReg.ID)) + must.Eq(t, services[1].Namespace, serviceReg.Namespace) } - require.Equal(t, 1, count2) + must.Eq(t, 1, count2) // Look up services using a namespace that shouldn't contain any // registrations. iter, err = testState.GetServiceRegistrationsByNamespace(ws, "pony-club") - require.NoError(t, err) + must.NoError(t, err) var count3 int for raw := iter.Next(); raw != nil; raw = iter.Next() { count3++ } - require.Equal(t, 0, count3) + must.Eq(t, 0, count3) } func TestStateStore_GetServiceRegistrationByName(t *testing.T) { @@ -410,34 +422,34 @@ func TestStateStore_GetServiceRegistrationByName(t *testing.T) { // Generate some test services and upsert them. services := mock.ServiceRegistrations() initialIndex := uint64(10) - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) // Try reading a service by a name that shouldn't exist. ws := memdb.NewWatchSet() iter, err := testState.GetServiceRegistrationByName(ws, "default", "pony-glitter-api") - require.NoError(t, err) + must.NoError(t, err) var count1 int for raw := iter.Next(); raw != nil; raw = iter.Next() { count1++ } - require.Equal(t, 0, count1) + must.Eq(t, 0, count1) // Read one of the known service registrations. expectedReg := services[1].Copy() iter, err = testState.GetServiceRegistrationByName(ws, expectedReg.Namespace, expectedReg.ServiceName) - require.NoError(t, err) + must.NoError(t, err) var count2 int for raw := iter.Next(); raw != nil; raw = iter.Next() { count2++ serviceReg := raw.(*structs.ServiceRegistration) - require.Equal(t, expectedReg.ServiceName, serviceReg.ServiceName) - require.Equal(t, expectedReg.Namespace, serviceReg.Namespace) + must.Eq(t, expectedReg.ServiceName, serviceReg.ServiceName) + must.Eq(t, expectedReg.Namespace, serviceReg.Namespace) } - require.Equal(t, 1, count2) + must.Eq(t, 1, count2) // Create a bunch of additional services whose name and namespace match // that of expectedReg. @@ -460,20 +472,20 @@ func TestStateStore_GetServiceRegistrationByName(t *testing.T) { } updateIndex := uint64(20) - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, updateIndex, newServices)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, updateIndex, newServices)) iter, err = testState.GetServiceRegistrationByName(ws, expectedReg.Namespace, expectedReg.ServiceName) - require.NoError(t, err) + must.NoError(t, err) var count3 int for raw := iter.Next(); raw != nil; raw = iter.Next() { count3++ serviceReg := raw.(*structs.ServiceRegistration) - require.Equal(t, expectedReg.ServiceName, serviceReg.ServiceName) - require.Equal(t, expectedReg.Namespace, serviceReg.Namespace) + must.Eq(t, expectedReg.ServiceName, serviceReg.ServiceName) + must.Eq(t, expectedReg.Namespace, serviceReg.Namespace) } - require.Equal(t, 5, count3) + must.Eq(t, 5, count3) } func TestStateStore_GetServiceRegistrationByID(t *testing.T) { @@ -483,23 +495,23 @@ func TestStateStore_GetServiceRegistrationByID(t *testing.T) { // Generate some test services and upsert them. services := mock.ServiceRegistrations() initialIndex := uint64(10) - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) ws := memdb.NewWatchSet() // Try reading a service by an ID that shouldn't exist. serviceReg, err := testState.GetServiceRegistrationByID(ws, "default", "pony-glitter-sparkles") - require.NoError(t, err) - require.Nil(t, serviceReg) + must.NoError(t, err) + must.Nil(t, serviceReg) // Read the two services that we should find. serviceReg, err = testState.GetServiceRegistrationByID(ws, services[0].Namespace, services[0].ID) - require.NoError(t, err) - require.Equal(t, services[0], serviceReg) + must.NoError(t, err) + must.Eq(t, services[0], serviceReg) serviceReg, err = testState.GetServiceRegistrationByID(ws, services[1].Namespace, services[1].ID) - require.NoError(t, err) - require.Equal(t, services[1], serviceReg) + must.NoError(t, err) + must.Eq(t, services[1], serviceReg) } func TestStateStore_GetServiceRegistrationsByAllocID(t *testing.T) { @@ -509,43 +521,43 @@ func TestStateStore_GetServiceRegistrationsByAllocID(t *testing.T) { // Generate some test services and upsert them. services := mock.ServiceRegistrations() initialIndex := uint64(10) - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) ws := memdb.NewWatchSet() // Try reading services by an allocation that doesn't have any // registrations. iter, err := testState.GetServiceRegistrationsByAllocID(ws, "4eed3c6d-6bf1-60d6-040a-e347accae6c4") - require.NoError(t, err) + must.NoError(t, err) var count1 int for raw := iter.Next(); raw != nil; raw = iter.Next() { count1++ } - require.Equal(t, 0, count1) + must.Eq(t, 0, count1) // Read the two allocations that we should find. iter, err = testState.GetServiceRegistrationsByAllocID(ws, services[0].AllocID) - require.NoError(t, err) + must.NoError(t, err) var count2 int for raw := iter.Next(); raw != nil; raw = iter.Next() { count2++ serviceReg := raw.(*structs.ServiceRegistration) - require.Equal(t, services[0].AllocID, serviceReg.AllocID) + must.Eq(t, services[0].AllocID, serviceReg.AllocID) } - require.Equal(t, 1, count2) + must.Eq(t, 1, count2) iter, err = testState.GetServiceRegistrationsByAllocID(ws, services[1].AllocID) - require.NoError(t, err) + must.NoError(t, err) var count3 int for raw := iter.Next(); raw != nil; raw = iter.Next() { count3++ serviceReg := raw.(*structs.ServiceRegistration) - require.Equal(t, services[1].AllocID, serviceReg.AllocID) + must.Eq(t, services[1].AllocID, serviceReg.AllocID) } - require.Equal(t, 1, count3) + must.Eq(t, 1, count3) } func TestStateStore_GetServiceRegistrationsByJobID(t *testing.T) { @@ -555,47 +567,51 @@ func TestStateStore_GetServiceRegistrationsByJobID(t *testing.T) { // Generate some test services and upsert them. services := mock.ServiceRegistrations() initialIndex := uint64(10) - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) ws := memdb.NewWatchSet() // Perform a query against a job that shouldn't have any registrations. iter, err := testState.GetServiceRegistrationsByJobID(ws, "default", "tamagotchi") - require.NoError(t, err) + must.NoError(t, err) var count1 int for raw := iter.Next(); raw != nil; raw = iter.Next() { count1++ } - require.Equal(t, 0, count1) + must.Eq(t, 0, count1) // Look up services using the namespace and jobID of the first service. iter, err = testState.GetServiceRegistrationsByJobID(ws, services[0].Namespace, services[0].JobID) - require.NoError(t, err) + must.NoError(t, err) var outputList1 []*structs.ServiceRegistration for raw := iter.Next(); raw != nil; raw = iter.Next() { serviceReg := raw.(*structs.ServiceRegistration) - require.Equal(t, initialIndex, serviceReg.CreateIndex, "incorrect create index", serviceReg.ID) - require.Equal(t, initialIndex, serviceReg.ModifyIndex, "incorrect modify index", serviceReg.ID) + must.Eq(t, initialIndex, serviceReg.CreateIndex, + must.Sprintf("incorrect create index for %s", serviceReg.ID)) + must.Eq(t, initialIndex, serviceReg.ModifyIndex, + must.Sprintf("incorrect modify index for %s", serviceReg.ID)) outputList1 = append(outputList1, serviceReg) } - require.ElementsMatch(t, outputList1, []*structs.ServiceRegistration{services[0]}) + must.SliceContainsAll(t, outputList1, []*structs.ServiceRegistration{services[0]}) // Look up services using the namespace and jobID of the second service. iter, err = testState.GetServiceRegistrationsByJobID(ws, services[1].Namespace, services[1].JobID) - require.NoError(t, err) + must.NoError(t, err) var outputList2 []*structs.ServiceRegistration for raw := iter.Next(); raw != nil; raw = iter.Next() { serviceReg := raw.(*structs.ServiceRegistration) - require.Equal(t, initialIndex, serviceReg.CreateIndex, "incorrect create index", serviceReg.ID) - require.Equal(t, initialIndex, serviceReg.ModifyIndex, "incorrect modify index", serviceReg.ID) + must.Eq(t, initialIndex, serviceReg.CreateIndex, + must.Sprintf("incorrect create index for %s", serviceReg.ID)) + must.Eq(t, initialIndex, serviceReg.ModifyIndex, + must.Sprintf("incorrect modify index for %s", serviceReg.ID)) outputList2 = append(outputList2, serviceReg) } - require.ElementsMatch(t, outputList2, []*structs.ServiceRegistration{services[1]}) + must.SliceContainsAll(t, outputList2, []*structs.ServiceRegistration{services[1]}) } func TestStateStore_GetServiceRegistrationsByNodeID(t *testing.T) { @@ -605,21 +621,21 @@ func TestStateStore_GetServiceRegistrationsByNodeID(t *testing.T) { // Generate some test services and upsert them. services := mock.ServiceRegistrations() initialIndex := uint64(10) - require.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) + must.NoError(t, testState.UpsertServiceRegistrations(structs.MsgTypeTestSetup, initialIndex, services)) ws := memdb.NewWatchSet() // Perform a query against a node that shouldn't have any registrations. serviceRegs, err := testState.GetServiceRegistrationsByNodeID(ws, "4eed3c6d-6bf1-60d6-040a-e347accae6c4") - require.NoError(t, err) - require.Len(t, serviceRegs, 0) + must.NoError(t, err) + must.Len(t, 0, serviceRegs) // Read the two nodes that we should find entries for. serviceRegs, err = testState.GetServiceRegistrationsByNodeID(ws, services[0].NodeID) - require.NoError(t, err) - require.Len(t, serviceRegs, 1) + must.NoError(t, err) + must.Len(t, 1, serviceRegs) serviceRegs, err = testState.GetServiceRegistrationsByNodeID(ws, services[1].NodeID) - require.NoError(t, err) - require.Len(t, serviceRegs, 1) + must.NoError(t, err) + must.Len(t, 1, serviceRegs) } diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 59e326cc4..622a8e161 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -6,7 +6,6 @@ package state import ( "context" "fmt" - "reflect" "sort" "strconv" "strings" @@ -20,9 +19,8 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/kr/pretty" + "github.com/shoenig/test" "github.com/shoenig/test/must" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func testStateStore(t *testing.T) *StateStore { @@ -36,7 +34,6 @@ func TestStateStore_InvalidConfig(t *testing.T) { } store, err := NewStateStore(config) must.Nil(t, store) - must.Error(t, err) must.ErrorContains(t, err, "JobTrackedVersions must be positive") } @@ -50,8 +47,8 @@ func TestStateStore_Blocking_Error(t *testing.T) { state := testStateStore(t) _, idx, err := state.BlockingQuery(errFn, 10, context.Background()) - assert.EqualError(t, err, expected.Error()) - assert.Zero(t, idx) + must.EqError(t, err, expected.Error()) + must.Zero(t, idx) } func TestStateStore_Blocking_Timeout(t *testing.T) { @@ -67,9 +64,9 @@ func TestStateStore_Blocking_Timeout(t *testing.T) { defer cancel() _, idx, err := state.BlockingQuery(noopFn, 10, deadlineCtx) - assert.EqualError(t, err, context.DeadlineExceeded.Error()) - assert.EqualValues(t, 5, idx) - assert.WithinDuration(t, timeout, time.Now(), 100*time.Millisecond) + must.EqError(t, err, context.DeadlineExceeded.Error()) + test.Eq(t, 5, idx) + test.LessEq(t, 100*time.Millisecond, time.Since(timeout)) } func TestStateStore_Blocking_MinQuery(t *testing.T) { @@ -103,11 +100,10 @@ func TestStateStore_Blocking_MinQuery(t *testing.T) { }) resp, idx, err := state.BlockingQuery(queryFn, 10, deadlineCtx) - if assert.Nil(t, err) { - assert.Equal(t, 2, count) - assert.EqualValues(t, 11, idx) - assert.True(t, resp.(bool)) - } + must.NoError(t, err) + test.Eq(t, 2, count) + test.Eq(t, 11, idx) + test.True(t, resp.(bool)) } // COMPAT 0.11: Uses AllocUpdateRequest.Alloc @@ -122,17 +118,13 @@ func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing job := alloc.Job alloc.Job = nil - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job)) eval := mock.Eval() eval.JobID = job.ID // Create an eval - if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval})) // Create a plan result res := structs.ApplyPlanResultsRequest{ @@ -142,27 +134,24 @@ func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing }, EvalID: eval.ID, } - assert := assert.New(t) + err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) - assert.Nil(err) + must.NoError(t, err) ws := memdb.NewWatchSet() out, err := state.AllocByID(ws, alloc.ID) - assert.Nil(err) - assert.Equal(alloc, out) + must.NoError(t, err) + test.Eq(t, alloc, out) index, err := state.Index("allocs") - assert.Nil(err) - assert.EqualValues(1000, index) - - if watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, err) + test.Eq(t, 1000, index) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) evalOut, err := state.EvalByID(ws, eval.ID) - assert.Nil(err) - assert.NotNil(evalOut) - assert.EqualValues(1000, evalOut.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, evalOut) + test.Eq(t, 1000, evalOut.ModifyIndex) } // This test checks that: @@ -191,22 +180,21 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { PreemptedByAllocation: alloc.ID, } - require := require.New(t) - require.NoError(state.UpsertAllocs( + must.NoError(t, state.UpsertAllocs( structs.MsgTypeTestSetup, 900, []*structs.Allocation{stoppedAlloc, preemptedAlloc})) - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job)) // modify job and ensure that stopped and preempted alloc point to original Job mJob := job.Copy() mJob.TaskGroups[0].Name = "other" - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, mJob)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, mJob)) eval := mock.Eval() eval.JobID = job.ID // Create an eval - require.NoError(state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval})) + must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval})) // Create a plan result res := structs.ApplyPlanResultsRequest{ @@ -218,49 +206,48 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { EvalID: eval.ID, AllocsPreempted: []*structs.AllocationDiff{preemptedAllocDiff}, } - assert := assert.New(t) + planModifyIndex := uint64(1000) err := state.UpsertPlanResults(structs.MsgTypeTestSetup, planModifyIndex, &res) - require.NoError(err) + must.NoError(t, err) ws := memdb.NewWatchSet() out, err := state.AllocByID(ws, alloc.ID) - require.NoError(err) - assert.Equal(alloc, out) + must.NoError(t, err) + test.Eq(t, alloc, out) outJob, err := state.JobByID(ws, job.Namespace, job.ID) - require.NoError(err) - require.Equal(mJob.TaskGroups, outJob.TaskGroups) - require.NotEmpty(job.TaskGroups, outJob.TaskGroups) + must.NoError(t, err) + must.Eq(t, mJob.TaskGroups, outJob.TaskGroups) + must.SliceNotEmpty(t, outJob.TaskGroups) updatedStoppedAlloc, err := state.AllocByID(ws, stoppedAlloc.ID) - require.NoError(err) - assert.Equal(stoppedAllocDiff.DesiredDescription, updatedStoppedAlloc.DesiredDescription) - assert.Equal(structs.AllocDesiredStatusStop, updatedStoppedAlloc.DesiredStatus) - assert.Equal(stoppedAllocDiff.ClientStatus, updatedStoppedAlloc.ClientStatus) - assert.Equal(planModifyIndex, updatedStoppedAlloc.AllocModifyIndex) - assert.Equal(planModifyIndex, updatedStoppedAlloc.AllocModifyIndex) - assert.Equal(job.TaskGroups, updatedStoppedAlloc.Job.TaskGroups) + must.NoError(t, err) + test.Eq(t, stoppedAllocDiff.DesiredDescription, updatedStoppedAlloc.DesiredDescription) + test.Eq(t, structs.AllocDesiredStatusStop, updatedStoppedAlloc.DesiredStatus) + test.Eq(t, stoppedAllocDiff.ClientStatus, updatedStoppedAlloc.ClientStatus) + test.Eq(t, planModifyIndex, updatedStoppedAlloc.AllocModifyIndex) + test.Eq(t, planModifyIndex, updatedStoppedAlloc.AllocModifyIndex) + test.Eq(t, job.TaskGroups, updatedStoppedAlloc.Job.TaskGroups) updatedPreemptedAlloc, err := state.AllocByID(ws, preemptedAlloc.ID) - require.NoError(err) - assert.Equal(structs.AllocDesiredStatusEvict, updatedPreemptedAlloc.DesiredStatus) - assert.Equal(preemptedAllocDiff.PreemptedByAllocation, updatedPreemptedAlloc.PreemptedByAllocation) - assert.Equal(planModifyIndex, updatedPreemptedAlloc.AllocModifyIndex) - assert.Equal(planModifyIndex, updatedPreemptedAlloc.AllocModifyIndex) - assert.Equal(job.TaskGroups, updatedPreemptedAlloc.Job.TaskGroups) + must.NoError(t, err) + test.Eq(t, structs.AllocDesiredStatusEvict, updatedPreemptedAlloc.DesiredStatus) + test.Eq(t, preemptedAllocDiff.PreemptedByAllocation, updatedPreemptedAlloc.PreemptedByAllocation) + test.Eq(t, planModifyIndex, updatedPreemptedAlloc.AllocModifyIndex) + test.Eq(t, planModifyIndex, updatedPreemptedAlloc.AllocModifyIndex) + test.Eq(t, job.TaskGroups, updatedPreemptedAlloc.Job.TaskGroups) index, err := state.Index("allocs") - require.NoError(err) - assert.EqualValues(planModifyIndex, index) + must.NoError(t, err) + test.Eq(t, planModifyIndex, index) - require.False(watchFired(ws)) + must.False(t, watchFired(ws)) evalOut, err := state.EvalByID(ws, eval.ID) - require.NoError(err) - require.NotNil(evalOut) - assert.EqualValues(planModifyIndex, evalOut.ModifyIndex) - + must.NoError(t, err) + must.NotNil(t, evalOut) + test.Eq(t, planModifyIndex, evalOut.ModifyIndex) } // This test checks that the deployment is created and allocations count towards @@ -279,17 +266,13 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { alloc.DeploymentID = d.ID alloc2.DeploymentID = d.ID - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job)) eval := mock.Eval() eval.JobID = job.ID // Create an eval - if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval})) // Create a plan result res := structs.ApplyPlanResultsRequest{ @@ -302,33 +285,27 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { } err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) ws := memdb.NewWatchSet() - assert := assert.New(t) out, err := state.AllocByID(ws, alloc.ID) - assert.Nil(err) - assert.Equal(alloc, out) + must.NoError(t, err) + test.Eq(t, alloc, out) dout, err := state.DeploymentByID(ws, d.ID) - assert.Nil(err) - assert.NotNil(dout) + must.NoError(t, err) + must.NotNil(t, dout) tg, ok := dout.TaskGroups[alloc.TaskGroup] - assert.True(ok) - assert.NotNil(tg) - assert.Equal(2, tg.PlacedAllocs) + test.True(t, ok) + must.NotNil(t, tg) + test.Eq(t, 2, tg.PlacedAllocs) evalOut, err := state.EvalByID(ws, eval.ID) - assert.Nil(err) - assert.NotNil(evalOut) - assert.EqualValues(1000, evalOut.ModifyIndex) - - if watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, err) + must.NotNil(t, evalOut) + test.Eq(t, 1000, evalOut.ModifyIndex) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) // Update the allocs to be part of a new deployment d2 := d.Copy() @@ -350,23 +327,21 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { } err = state.UpsertPlanResults(structs.MsgTypeTestSetup, 1001, &res) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) dout, err = state.DeploymentByID(ws, d2.ID) - assert.Nil(err) - assert.NotNil(dout) + must.NoError(t, err) + must.NotNil(t, dout) tg, ok = dout.TaskGroups[alloc.TaskGroup] - assert.True(ok) - assert.NotNil(tg) - assert.Equal(2, tg.PlacedAllocs) + test.True(t, ok) + must.NotNil(t, tg) + test.Eq(t, 2, tg.PlacedAllocs) evalOut, err = state.EvalByID(ws, eval.ID) - assert.Nil(err) - assert.NotNil(evalOut) - assert.EqualValues(1001, evalOut.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, evalOut) + test.Eq(t, 1001, evalOut.ModifyIndex) } // This test checks that: @@ -374,7 +349,6 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { // 2) Evals are inserted for preempted jobs func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) alloc := mock.Alloc() @@ -383,18 +357,18 @@ func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { // Insert job err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job) - require.NoError(err) + must.NoError(t, err) // Create an eval eval := mock.Eval() eval.JobID = job.ID err = state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval}) - require.NoError(err) + must.NoError(t, err) // Insert alloc that will be preempted in the plan preemptedAlloc := mock.Alloc() err = state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{preemptedAlloc}) - require.NoError(err) + must.NoError(t, err) minimalPreemptedAlloc := &structs.Allocation{ ID: preemptedAlloc.ID, @@ -418,37 +392,37 @@ func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { } err = state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) - require.NoError(err) + must.NoError(t, err) ws := memdb.NewWatchSet() // Verify alloc and eval created by plan out, err := state.AllocByID(ws, alloc.ID) - require.NoError(err) - require.Equal(alloc, out) + must.NoError(t, err) + must.Eq(t, alloc, out) index, err := state.Index("allocs") - require.NoError(err) - require.EqualValues(1000, index) + must.NoError(t, err) + must.Eq(t, 1000, index) evalOut, err := state.EvalByID(ws, eval.ID) - require.NoError(err) - require.NotNil(evalOut) - require.EqualValues(1000, evalOut.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, evalOut) + must.Eq(t, 1000, evalOut.ModifyIndex) // Verify preempted alloc preempted, err := state.AllocByID(ws, preemptedAlloc.ID) - require.NoError(err) - require.Equal(preempted.DesiredStatus, structs.AllocDesiredStatusEvict) - require.Equal(preempted.DesiredDescription, fmt.Sprintf("Preempted by alloc ID %v", alloc.ID)) - require.Equal(preempted.Job.ID, preemptedAlloc.Job.ID) - require.Equal(preempted.Job, preemptedAlloc.Job) + must.NoError(t, err) + must.Eq(t, preempted.DesiredStatus, structs.AllocDesiredStatusEvict) + must.Eq(t, preempted.DesiredDescription, fmt.Sprintf("Preempted by alloc ID %v", alloc.ID)) + must.Eq(t, preempted.Job.ID, preemptedAlloc.Job.ID) + must.Eq(t, preempted.Job, preemptedAlloc.Job) // Verify eval for preempted job preemptedJobEval, err := state.EvalByID(ws, eval2.ID) - require.NoError(err) - require.NotNil(preemptedJobEval) - require.EqualValues(1000, preemptedJobEval.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, preemptedJobEval) + must.Eq(t, 1000, preemptedJobEval.ModifyIndex) } @@ -459,25 +433,19 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { // Create a job that applies to all job := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, job)) // Create a deployment that we will update its status doutstanding := mock.Deployment() doutstanding.JobID = job.ID - if err := state.UpsertDeployment(1000, doutstanding); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertDeployment(1000, doutstanding)) eval := mock.Eval() eval.JobID = job.ID // Create an eval - if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval})) alloc := mock.Alloc() alloc.Job = nil @@ -504,36 +472,31 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { } err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) - if err != nil { - t.Fatalf("err: %v", err) - } - assert := assert.New(t) + must.NoError(t, err) ws := memdb.NewWatchSet() // Check the deployments are correctly updated. dout, err := state.DeploymentByID(ws, dnew.ID) - assert.Nil(err) - assert.NotNil(dout) + must.NoError(t, err) + must.NotNil(t, dout) tg, ok := dout.TaskGroups[alloc.TaskGroup] - assert.True(ok) - assert.NotNil(tg) - assert.Equal(1, tg.PlacedAllocs) + test.True(t, ok) + must.NotNil(t, tg) + test.Eq(t, 1, tg.PlacedAllocs) doutstandingout, err := state.DeploymentByID(ws, doutstanding.ID) - assert.Nil(err) - assert.NotNil(doutstandingout) - assert.Equal(update.Status, doutstandingout.Status) - assert.Equal(update.StatusDescription, doutstandingout.StatusDescription) - assert.EqualValues(1000, doutstandingout.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, doutstandingout) + test.Eq(t, update.Status, doutstandingout.Status) + test.Eq(t, update.StatusDescription, doutstandingout.StatusDescription) + test.Eq(t, 1000, doutstandingout.ModifyIndex) evalOut, err := state.EvalByID(ws, eval.ID) - assert.Nil(err) - assert.NotNil(evalOut) - assert.EqualValues(1000, evalOut.ModifyIndex) - if watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, err) + must.NotNil(t, evalOut) + test.Eq(t, 1000, evalOut.ModifyIndex) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_UpsertPlanResults_AllocationResources(t *testing.T) { @@ -589,39 +552,22 @@ func TestStateStore_UpsertDeployment(t *testing.T) { // Create a watchset so we can test that upsert fires the watch ws := memdb.NewWatchSet() _, err := state.DeploymentsByJobID(ws, deployment.Namespace, deployment.ID, true) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) err = state.UpsertDeployment(1000, deployment) - if err != nil { - t.Fatalf("err: %v", err) - } - if !watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, err) + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.DeploymentByID(ws, deployment.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if !reflect.DeepEqual(deployment, out) { - t.Fatalf("bad: %#v %#v", deployment, out) - } + must.NoError(t, err) + must.Eq(t, deployment, out) index, err := state.Index("deployment") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1000 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1000, index) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } // Tests that deployments of older create index and same job id are not returned @@ -641,26 +587,24 @@ func TestStateStore_OldDeployment(t *testing.T) { deploy2.JobID = job.ID deploy2.JobCreateIndex = 11 - require := require.New(t) - // Insert both deployments err := state.UpsertDeployment(1001, deploy1) - require.Nil(err) + must.NoError(t, err) err = state.UpsertDeployment(1002, deploy2) - require.Nil(err) + must.NoError(t, err) ws := memdb.NewWatchSet() // Should return both deployments deploys, err := state.DeploymentsByJobID(ws, deploy1.Namespace, job.ID, true) - require.Nil(err) - require.Len(deploys, 2) + must.NoError(t, err) + must.Len(t, 2, deploys) // Should only return deploy1 deploys, err = state.DeploymentsByJobID(ws, deploy1.Namespace, job.ID, false) - require.Nil(err) - require.Len(deploys, 1) - require.Equal(deploy1.ID, deploys[0].ID) + must.NoError(t, err) + must.Len(t, 1, deploys) + must.Eq(t, deploy1.ID, deploys[0].ID) } func TestStateStore_DeleteDeployment(t *testing.T) { @@ -671,49 +615,29 @@ func TestStateStore_DeleteDeployment(t *testing.T) { d2 := mock.Deployment() err := state.UpsertDeployment(1000, d1) - if err != nil { - t.Fatalf("err: %v", err) - } - if err := state.UpsertDeployment(1001, d2); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) + must.NoError(t, state.UpsertDeployment(1001, d2)) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() - if _, err := state.DeploymentByID(ws, d1.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err = state.DeploymentByID(ws, d1.ID) + must.NoError(t, err) err = state.DeleteDeployment(1002, []string{d1.ID, d2.ID}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.DeploymentByID(ws, d1.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if out != nil { - t.Fatalf("bad: %#v %#v", d1, out) - } + must.NoError(t, err) + must.Nil(t, out, must.Sprintf("expect no deployment %#v", d1)) index, err := state.Index("deployment") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1002 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1002, index) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_Deployments(t *testing.T) { @@ -722,17 +646,17 @@ func TestStateStore_Deployments(t *testing.T) { state := testStateStore(t) var deployments []*structs.Deployment - for i := 0; i < 10; i++ { + for i := range 10 { deployment := mock.Deployment() deployments = append(deployments, deployment) err := state.UpsertDeployment(1000+uint64(i), deployment) - require.NoError(t, err) + must.NoError(t, err) } ws := memdb.NewWatchSet() it, err := state.Deployments(ws, SortDefault) - require.NoError(t, err) + must.NoError(t, err) var out []*structs.Deployment for { @@ -743,8 +667,8 @@ func TestStateStore_Deployments(t *testing.T) { out = append(out, raw.(*structs.Deployment)) } - require.Equal(t, deployments, out) - require.False(t, watchFired(ws)) + must.Eq(t, deployments, out) + must.False(t, watchFired(ws)) } func TestStateStore_Deployments_Namespace(t *testing.T) { @@ -766,27 +690,27 @@ func TestStateStore_Deployments_Namespace(t *testing.T) { deploy3.Namespace = ns2.Name deploy4.Namespace = ns2.Name - require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + must.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) // Create watchsets so we can test that update fires the watch watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} _, err := state.DeploymentsByNamespace(watches[0], ns1.Name) - require.NoError(t, err) + must.NoError(t, err) _, err = state.DeploymentsByNamespace(watches[1], ns2.Name) - require.NoError(t, err) + must.NoError(t, err) - require.NoError(t, state.UpsertDeployment(1001, deploy1)) - require.NoError(t, state.UpsertDeployment(1002, deploy2)) - require.NoError(t, state.UpsertDeployment(1003, deploy3)) - require.NoError(t, state.UpsertDeployment(1004, deploy4)) - require.True(t, watchFired(watches[0])) - require.True(t, watchFired(watches[1])) + must.NoError(t, state.UpsertDeployment(1001, deploy1)) + must.NoError(t, state.UpsertDeployment(1002, deploy2)) + must.NoError(t, state.UpsertDeployment(1003, deploy3)) + must.NoError(t, state.UpsertDeployment(1004, deploy4)) + must.True(t, watchFired(watches[0])) + must.True(t, watchFired(watches[1])) ws := memdb.NewWatchSet() iter1, err := state.DeploymentsByNamespace(ws, ns1.Name) - require.NoError(t, err) + must.NoError(t, err) iter2, err := state.DeploymentsByNamespace(ws, ns2.Name) - require.NoError(t, err) + must.NoError(t, err) var out1 []*structs.Deployment for { @@ -806,20 +730,20 @@ func TestStateStore_Deployments_Namespace(t *testing.T) { out2 = append(out2, raw.(*structs.Deployment)) } - require.Len(t, out1, 2) - require.Len(t, out2, 2) + must.Len(t, 2, out1) + must.Len(t, 2, out2) for _, deploy := range out1 { - require.Equal(t, ns1.Name, deploy.Namespace) + must.Eq(t, ns1.Name, deploy.Namespace) } for _, deploy := range out2 { - require.Equal(t, ns2.Name, deploy.Namespace) + must.Eq(t, ns2.Name, deploy.Namespace) } index, err := state.Index("deployment") - require.NoError(t, err) - require.EqualValues(t, 1004, index) - require.False(t, watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1004, index) + must.False(t, watchFired(ws)) } func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { @@ -830,7 +754,7 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { deploy.ID = "11111111-662e-d0ab-d1c9-3e434af7bdb4" err := state.UpsertDeployment(1000, deploy) - require.NoError(t, err) + must.NoError(t, err) gatherDeploys := func(iter memdb.ResultIterator) []*structs.Deployment { var deploys []*structs.Deployment @@ -849,51 +773,51 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { // Create a watchset so we can test that getters don't cause it to fire ws := memdb.NewWatchSet() iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, deploy.ID, SortDefault) - require.NoError(t, err) + must.NoError(t, err) deploys := gatherDeploys(iter) - require.Len(t, deploys, 1) - require.False(t, watchFired(ws)) + must.Len(t, 1, deploys) + must.False(t, watchFired(ws)) }) t.Run("using prefix", func(t *testing.T) { ws := memdb.NewWatchSet() iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11", SortDefault) - require.NoError(t, err) + must.NoError(t, err) deploys := gatherDeploys(iter) - require.Len(t, deploys, 1) - require.False(t, watchFired(ws)) + must.Len(t, 1, deploys) + must.False(t, watchFired(ws)) }) deploy = mock.Deployment() deploy.ID = "11222222-662e-d0ab-d1c9-3e434af7bdb4" err = state.UpsertDeployment(1001, deploy) - require.NoError(t, err) + must.NoError(t, err) t.Run("more than one", func(t *testing.T) { ws := memdb.NewWatchSet() iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11", SortDefault) - require.NoError(t, err) + must.NoError(t, err) deploys := gatherDeploys(iter) - require.Len(t, deploys, 2) + must.Len(t, 2, deploys) }) t.Run("filter to one", func(t *testing.T) { ws := memdb.NewWatchSet() iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, "1111", SortDefault) - require.NoError(t, err) + must.NoError(t, err) deploys := gatherDeploys(iter) - require.Len(t, deploys, 1) - require.False(t, watchFired(ws)) + must.Len(t, 1, deploys) + must.False(t, watchFired(ws)) }) t.Run("reverse order", func(t *testing.T) { ws := memdb.NewWatchSet() iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11", SortReverse) - require.NoError(t, err) + must.NoError(t, err) got := []string{} for _, d := range gatherDeploys(iter) { @@ -903,8 +827,8 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { "11222222-662e-d0ab-d1c9-3e434af7bdb4", "11111111-662e-d0ab-d1c9-3e434af7bdb4", } - require.Equal(t, expected, got) - require.False(t, watchFired(ws)) + must.Eq(t, expected, got) + must.False(t, watchFired(ws)) }) } @@ -925,9 +849,9 @@ func TestStateStore_DeploymentsByIDPrefix_Namespaces(t *testing.T) { deploy1.Namespace = ns1.Name deploy2.Namespace = ns2.Name - require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) - require.NoError(t, state.UpsertDeployment(1000, deploy1)) - require.NoError(t, state.UpsertDeployment(1001, deploy2)) + must.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + must.NoError(t, state.UpsertDeployment(1000, deploy1)) + must.NoError(t, state.UpsertDeployment(1001, deploy2)) gatherDeploys := func(iter memdb.ResultIterator) []*structs.Deployment { var deploys []*structs.Deployment @@ -944,21 +868,21 @@ func TestStateStore_DeploymentsByIDPrefix_Namespaces(t *testing.T) { ws := memdb.NewWatchSet() iter1, err := state.DeploymentsByIDPrefix(ws, ns1.Name, sharedPrefix, SortDefault) - require.NoError(t, err) + must.NoError(t, err) iter2, err := state.DeploymentsByIDPrefix(ws, ns2.Name, sharedPrefix, SortDefault) - require.NoError(t, err) + must.NoError(t, err) deploysNs1 := gatherDeploys(iter1) deploysNs2 := gatherDeploys(iter2) - require.Len(t, deploysNs1, 1) - require.Len(t, deploysNs2, 1) + must.Len(t, 1, deploysNs1) + must.Len(t, 1, deploysNs2) iter1, err = state.DeploymentsByIDPrefix(ws, ns1.Name, deploy1.ID[:8], SortDefault) - require.NoError(t, err) + must.NoError(t, err) deploysNs1 = gatherDeploys(iter1) - require.Len(t, deploysNs1, 1) - require.False(t, watchFired(ws)) + must.Len(t, 1, deploysNs1) + must.False(t, watchFired(ws)) } func TestStateStore_UpsertNamespaces(t *testing.T) { @@ -971,24 +895,24 @@ func TestStateStore_UpsertNamespaces(t *testing.T) { // Create a watchset so we can test that upsert fires the watch ws := memdb.NewWatchSet() _, err := state.NamespaceByName(ws, ns1.Name) - require.NoError(t, err) + must.NoError(t, err) - require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns1, ns2})) - require.True(t, watchFired(ws)) + must.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns1, ns2})) + must.True(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err := state.NamespaceByName(ws, ns1.Name) - require.NoError(t, err) - require.Equal(t, ns1, out) + must.NoError(t, err) + must.Eq(t, ns1, out) out, err = state.NamespaceByName(ws, ns2.Name) - require.NoError(t, err) - require.Equal(t, ns2, out) + must.NoError(t, err) + must.Eq(t, ns2, out) index, err := state.Index(TableNamespaces) - require.NoError(t, err) - require.EqualValues(t, 1000, index) - require.False(t, watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1000, index) + must.False(t, watchFired(ws)) } func TestStateStore_DeleteNamespaces(t *testing.T) { @@ -998,29 +922,29 @@ func TestStateStore_DeleteNamespaces(t *testing.T) { ns1 := mock.Namespace() ns2 := mock.Namespace() - require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns1, ns2})) + must.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns1, ns2})) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() _, err := state.NamespaceByName(ws, ns1.Name) - require.NoError(t, err) + must.NoError(t, err) - require.NoError(t, state.DeleteNamespaces(1001, []string{ns1.Name, ns2.Name})) - require.True(t, watchFired(ws)) + must.NoError(t, state.DeleteNamespaces(1001, []string{ns1.Name, ns2.Name})) + must.True(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err := state.NamespaceByName(ws, ns1.Name) - require.NoError(t, err) - require.Nil(t, out) + must.NoError(t, err) + must.Nil(t, out) out, err = state.NamespaceByName(ws, ns2.Name) - require.NoError(t, err) - require.Nil(t, out) + must.NoError(t, err) + must.Nil(t, out) index, err := state.Index(TableNamespaces) - require.NoError(t, err) - require.EqualValues(t, 1001, index) - require.False(t, watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1001, index) + must.False(t, watchFired(ws)) } func TestStateStore_DeleteNamespaces_Default(t *testing.T) { @@ -1030,11 +954,11 @@ func TestStateStore_DeleteNamespaces_Default(t *testing.T) { ns := mock.Namespace() ns.Name = structs.DefaultNamespace - require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) + must.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) err := state.DeleteNamespaces(1002, []string{ns.Name}) - require.Error(t, err) - require.Contains(t, err.Error(), "can not be deleted") + must.Error(t, err) + must.ErrorContains(t, err, "can not be deleted") } func TestStateStore_DeleteNamespaces_NonTerminalJobs(t *testing.T) { @@ -1043,31 +967,31 @@ func TestStateStore_DeleteNamespaces_NonTerminalJobs(t *testing.T) { state := testStateStore(t) ns := mock.Namespace() - require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) + must.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) job := mock.Job() job.Namespace = ns.Name - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job)) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() _, err := state.NamespaceByName(ws, ns.Name) - require.NoError(t, err) + must.NoError(t, err) err = state.DeleteNamespaces(1002, []string{ns.Name}) - require.Error(t, err) - require.Contains(t, err.Error(), "one non-terminal") - require.False(t, watchFired(ws)) + must.Error(t, err) + must.ErrorContains(t, err, "one non-terminal") + must.False(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err := state.NamespaceByName(ws, ns.Name) - require.NoError(t, err) - require.NotNil(t, out) + must.NoError(t, err) + must.NotNil(t, out) index, err := state.Index(TableNamespaces) - require.NoError(t, err) - require.EqualValues(t, 1000, index) - require.False(t, watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1000, index) + must.False(t, watchFired(ws)) } func TestStateStore_DeleteNamespaces_CSIVolumes(t *testing.T) { @@ -1076,33 +1000,33 @@ func TestStateStore_DeleteNamespaces_CSIVolumes(t *testing.T) { state := testStateStore(t) ns := mock.Namespace() - require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) + must.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) plugin := mock.CSIPlugin() vol := mock.CSIVolume(plugin) vol.Namespace = ns.Name - require.NoError(t, state.UpsertCSIVolume(1001, []*structs.CSIVolume{vol})) + must.NoError(t, state.UpsertCSIVolume(1001, []*structs.CSIVolume{vol})) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() _, err := state.NamespaceByName(ws, ns.Name) - require.NoError(t, err) + must.NoError(t, err) err = state.DeleteNamespaces(1002, []string{ns.Name}) - require.Error(t, err) - require.Contains(t, err.Error(), "one CSI volume") - require.False(t, watchFired(ws)) + must.Error(t, err) + must.ErrorContains(t, err, "one CSI volume") + must.False(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err := state.NamespaceByName(ws, ns.Name) - require.NoError(t, err) - require.NotNil(t, out) + must.NoError(t, err) + must.NotNil(t, out) index, err := state.Index(TableNamespaces) - require.NoError(t, err) - require.EqualValues(t, 1000, index) - require.False(t, watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1000, index) + must.False(t, watchFired(ws)) } func TestStateStore_DeleteNamespaces_Variables(t *testing.T) { @@ -1111,7 +1035,7 @@ func TestStateStore_DeleteNamespaces_Variables(t *testing.T) { state := testStateStore(t) ns := mock.Namespace() - require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) + must.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) sv := mock.VariableEncrypted() sv.Namespace = ns.Name @@ -1120,27 +1044,27 @@ func TestStateStore_DeleteNamespaces_Variables(t *testing.T) { Op: structs.VarOpSet, Var: sv, }) - require.NoError(t, resp.Error) + must.NoError(t, resp.Error) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() _, err := state.NamespaceByName(ws, ns.Name) - require.NoError(t, err) + must.NoError(t, err) err = state.DeleteNamespaces(1002, []string{ns.Name}) - require.Error(t, err) - require.Contains(t, err.Error(), "one variable") - require.False(t, watchFired(ws)) + must.Error(t, err) + must.ErrorContains(t, err, "one variable") + must.False(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err := state.NamespaceByName(ws, ns.Name) - require.NoError(t, err) - require.NotNil(t, out) + must.NoError(t, err) + must.NotNil(t, out) index, err := state.Index(TableNamespaces) - require.NoError(t, err) - require.EqualValues(t, 1000, index) - require.False(t, watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1000, index) + must.False(t, watchFired(ws)) } func TestStateStore_Namespaces(t *testing.T) { @@ -1149,17 +1073,17 @@ func TestStateStore_Namespaces(t *testing.T) { state := testStateStore(t) var namespaces []*structs.Namespace - for i := 0; i < 10; i++ { + for range 10 { ns := mock.Namespace() namespaces = append(namespaces, ns) } - require.NoError(t, state.UpsertNamespaces(1000, namespaces)) + must.NoError(t, state.UpsertNamespaces(1000, namespaces)) // Create a watchset so we can test that getters don't cause it to fire ws := memdb.NewWatchSet() iter, err := state.Namespaces(ws) - require.NoError(t, err) + must.NoError(t, err) var out []*structs.Namespace for { @@ -1176,8 +1100,8 @@ func TestStateStore_Namespaces(t *testing.T) { namespaceSort(namespaces) namespaceSort(out) - require.Equal(t, namespaces, out) - require.False(t, watchFired(ws)) + must.Eq(t, namespaces, out) + must.False(t, watchFired(ws)) } func TestStateStore_NamespaceNames(t *testing.T) { @@ -1187,22 +1111,22 @@ func TestStateStore_NamespaceNames(t *testing.T) { var namespaces []*structs.Namespace expectedNames := []string{structs.DefaultNamespace} - for i := 0; i < 10; i++ { + for range 10 { ns := mock.Namespace() namespaces = append(namespaces, ns) expectedNames = append(expectedNames, ns.Name) } err := state.UpsertNamespaces(1000, namespaces) - require.NoError(t, err) + must.NoError(t, err) found, err := state.NamespaceNames() - require.NoError(t, err) + must.NoError(t, err) sort.Strings(expectedNames) sort.Strings(found) - require.Equal(t, expectedNames, found) + must.Eq(t, expectedNames, found) } func TestStateStore_NamespaceByNamePrefix(t *testing.T) { @@ -1212,12 +1136,12 @@ func TestStateStore_NamespaceByNamePrefix(t *testing.T) { ns := mock.Namespace() ns.Name = "foobar" - require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) + must.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) // Create a watchset so we can test that getters don't cause it to fire ws := memdb.NewWatchSet() iter, err := state.NamespacesByNamePrefix(ws, ns.Name) - require.NoError(t, err) + must.NoError(t, err) gatherNamespaces := func(iter memdb.ResultIterator) []*structs.Namespace { var namespaces []*structs.Namespace @@ -1233,34 +1157,34 @@ func TestStateStore_NamespaceByNamePrefix(t *testing.T) { } namespaces := gatherNamespaces(iter) - require.Len(t, namespaces, 1) - require.False(t, watchFired(ws)) + must.Len(t, 1, namespaces) + must.False(t, watchFired(ws)) iter, err = state.NamespacesByNamePrefix(ws, "foo") - require.NoError(t, err) + must.NoError(t, err) namespaces = gatherNamespaces(iter) - require.Len(t, namespaces, 1) + must.Len(t, 1, namespaces) ns = mock.Namespace() ns.Name = "foozip" err = state.UpsertNamespaces(1001, []*structs.Namespace{ns}) - require.NoError(t, err) - require.True(t, watchFired(ws)) + must.NoError(t, err) + must.True(t, watchFired(ws)) ws = memdb.NewWatchSet() iter, err = state.NamespacesByNamePrefix(ws, "foo") - require.NoError(t, err) + must.NoError(t, err) namespaces = gatherNamespaces(iter) - require.Len(t, namespaces, 2) + must.Len(t, 2, namespaces) iter, err = state.NamespacesByNamePrefix(ws, "foob") - require.NoError(t, err) + must.NoError(t, err) namespaces = gatherNamespaces(iter) - require.Len(t, namespaces, 1) - require.False(t, watchFired(ws)) + must.Len(t, 1, namespaces) + must.False(t, watchFired(ws)) } func TestStateStore_RestoreNamespace(t *testing.T) { @@ -1270,15 +1194,15 @@ func TestStateStore_RestoreNamespace(t *testing.T) { ns := mock.Namespace() restore, err := state.Restore() - require.NoError(t, err) + must.NoError(t, err) - require.NoError(t, restore.NamespaceRestore(ns)) + must.NoError(t, restore.NamespaceRestore(ns)) restore.Commit() ws := memdb.NewWatchSet() out, err := state.NamespaceByName(ws, ns.Name) - require.NoError(t, err) - require.Equal(t, out, ns) + must.NoError(t, err) + must.Eq(t, out, ns) } // namespaceSort is used to sort namespaces by name @@ -1291,45 +1215,44 @@ func namespaceSort(namespaces []*structs.Namespace) { func TestStateStore_UpsertNode_Node(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) node := mock.Node() // Create a watchset so we can test that upsert fires the watch ws := memdb.NewWatchSet() _, err := state.NodeByID(ws, node.ID) - require.NoError(err) + must.NoError(t, err) - require.NoError(state.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) - require.True(watchFired(ws)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) + must.True(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err := state.NodeByID(ws, node.ID) - require.NoError(err) + must.NoError(t, err) out2, err := state.NodeBySecretID(ws, node.SecretID) - require.NoError(err) - require.EqualValues(node, out) - require.EqualValues(node, out2) - require.Len(out.Events, 1) - require.Equal(NodeRegisterEventRegistered, out.Events[0].Message) + must.NoError(t, err) + must.Eq(t, node, out) + must.Eq(t, node, out2) + must.Len(t, 1, out.Events) + must.Eq(t, NodeRegisterEventRegistered, out.Events[0].Message) index, err := state.Index("nodes") - require.NoError(err) - require.EqualValues(1000, index) - require.False(watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1000, index) + must.False(t, watchFired(ws)) // Transition the node to down and then up and ensure we get a re-register // event down := out.Copy() down.Status = structs.NodeStatusDown - require.NoError(state.UpsertNode(structs.MsgTypeTestSetup, 1001, down)) - require.NoError(state.UpsertNode(structs.MsgTypeTestSetup, 1002, out)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1001, down)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, out)) out, err = state.NodeByID(ws, node.ID) - require.NoError(err) - require.Len(out.Events, 2) - require.Equal(NodeRegisterEventReregistered, out.Events[1].Message) + must.NoError(t, err) + must.Len(t, 2, out.Events) + must.Eq(t, NodeRegisterEventReregistered, out.Events[1].Message) } func TestStateStore_UpsertNode_NodePool(t *testing.T) { @@ -1519,55 +1442,54 @@ func TestStateStore_DeleteNode_Node(t *testing.T) { node0 := mock.Node() node1 := mock.Node() err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, node0) - require.NoError(t, err) + must.NoError(t, err) err = state.UpsertNode(structs.MsgTypeTestSetup, 1001, node1) - require.NoError(t, err) + must.NoError(t, err) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() // Check that both nodes are not nil out, err := state.NodeByID(ws, node0.ID) - require.NoError(t, err) - require.NotNil(t, out) + must.NoError(t, err) + must.NotNil(t, out) out, err = state.NodeByID(ws, node1.ID) - require.NoError(t, err) - require.NotNil(t, out) + must.NoError(t, err) + must.NotNil(t, out) // Delete both nodes in a batch, fires the watch err = state.DeleteNode(structs.MsgTypeTestSetup, 1002, []string{node0.ID, node1.ID}) - require.NoError(t, err) - require.True(t, watchFired(ws)) + must.NoError(t, err) + must.True(t, watchFired(ws)) // Check that both nodes are nil ws = memdb.NewWatchSet() out, err = state.NodeByID(ws, node0.ID) - require.NoError(t, err) - require.Nil(t, out) + must.NoError(t, err) + must.Nil(t, out) out, err = state.NodeByID(ws, node1.ID) - require.NoError(t, err) - require.Nil(t, out) + must.NoError(t, err) + must.Nil(t, out) // Ensure that the index is still at 1002, from DeleteNode index, err := state.Index("nodes") - require.NoError(t, err) - require.Equal(t, uint64(1002), index) - require.False(t, watchFired(ws)) + must.NoError(t, err) + must.Eq(t, uint64(1002), index) + must.False(t, watchFired(ws)) } func TestStateStore_UpdateNodeStatus_Node(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) node := mock.Node() - require.NoError(state.UpsertNode(structs.MsgTypeTestSetup, 800, node)) + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 800, node)) // Create a watchset so we can test that update node status fires the watch ws := memdb.NewWatchSet() _, err := state.NodeByID(ws, node.ID) - require.NoError(err) + must.NoError(t, err) event := &structs.NodeEvent{ Message: "Node ready foo", @@ -1575,22 +1497,22 @@ func TestStateStore_UpdateNodeStatus_Node(t *testing.T) { Timestamp: time.Now(), } - require.NoError(state.UpdateNodeStatus(structs.MsgTypeTestSetup, 801, node.ID, structs.NodeStatusReady, 70, event)) - require.True(watchFired(ws)) + must.NoError(t, state.UpdateNodeStatus(structs.MsgTypeTestSetup, 801, node.ID, structs.NodeStatusReady, 70, event)) + must.True(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err := state.NodeByID(ws, node.ID) - require.NoError(err) - require.Equal(structs.NodeStatusReady, out.Status) - require.EqualValues(801, out.ModifyIndex) - require.EqualValues(70, out.StatusUpdatedAt) - require.Len(out.Events, 2) - require.Equal(event.Message, out.Events[1].Message) + must.NoError(t, err) + must.Eq(t, structs.NodeStatusReady, out.Status) + must.Eq(t, 801, out.ModifyIndex) + must.Eq(t, 70, out.StatusUpdatedAt) + must.Len(t, 2, out.Events) + must.Eq(t, event.Message, out.Events[1].Message) index, err := state.Index("nodes") - require.NoError(err) - require.EqualValues(801, index) - require.False(watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 801, index) + must.False(t, watchFired(ws)) } func TestStatStore_UpdateNodeStatus_LastMissedHeartbeatIndex(t *testing.T) { @@ -1691,18 +1613,17 @@ func TestStatStore_UpdateNodeStatus_LastMissedHeartbeatIndex(t *testing.T) { func TestStateStore_BatchUpdateNodeDrain(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) n1, n2 := mock.Node(), mock.Node() - require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1000, n1)) - require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1001, n2)) + must.Nil(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, n1)) + must.Nil(t, state.UpsertNode(structs.MsgTypeTestSetup, 1001, n2)) // Create a watchset so we can test that update node drain fires the watch ws := memdb.NewWatchSet() _, err := state.NodeByID(ws, n1.ID) - require.Nil(err) + must.NoError(t, err) expectedDrain := &structs.DrainStrategy{ DrainSpec: structs.DrainSpec{ @@ -1729,41 +1650,40 @@ func TestStateStore_BatchUpdateNodeDrain(t *testing.T) { n2.ID: event, } - require.Nil(state.BatchUpdateNodeDrain(structs.MsgTypeTestSetup, 1002, 7, update, events)) - require.True(watchFired(ws)) + must.Nil(t, state.BatchUpdateNodeDrain(structs.MsgTypeTestSetup, 1002, 7, update, events)) + must.True(t, watchFired(ws)) ws = memdb.NewWatchSet() for _, id := range []string{n1.ID, n2.ID} { out, err := state.NodeByID(ws, id) - require.Nil(err) - require.NotNil(out.DrainStrategy) - require.Equal(out.DrainStrategy, expectedDrain) - require.NotNil(out.LastDrain) - require.Equal(structs.DrainStatusDraining, out.LastDrain.Status) - require.Len(out.Events, 2) - require.EqualValues(1002, out.ModifyIndex) - require.EqualValues(7, out.StatusUpdatedAt) + must.NoError(t, err) + must.NotNil(t, out.DrainStrategy) + must.Eq(t, out.DrainStrategy, expectedDrain) + must.NotNil(t, out.LastDrain) + must.Eq(t, structs.DrainStatusDraining, out.LastDrain.Status) + must.Len(t, 2, out.Events) + must.Eq(t, 1002, out.ModifyIndex) + must.Eq(t, 7, out.StatusUpdatedAt) } index, err := state.Index("nodes") - require.Nil(err) - require.EqualValues(1002, index) - require.False(watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1002, index) + must.False(t, watchFired(ws)) } func TestStateStore_UpdateNodeDrain_Node(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) node := mock.Node() - require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) + must.Nil(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) // Create a watchset so we can test that update node drain fires the watch ws := memdb.NewWatchSet() _, err := state.NodeByID(ws, node.ID) - require.Nil(err) + must.NoError(t, err) expectedDrain := &structs.DrainStrategy{ DrainSpec: structs.DrainSpec{ @@ -1776,29 +1696,28 @@ func TestStateStore_UpdateNodeDrain_Node(t *testing.T) { Subsystem: structs.NodeEventSubsystemDrain, Timestamp: time.Now(), } - require.Nil(state.UpdateNodeDrain(structs.MsgTypeTestSetup, 1001, node.ID, expectedDrain, false, 7, event, nil, "")) - require.True(watchFired(ws)) + must.Nil(t, state.UpdateNodeDrain(structs.MsgTypeTestSetup, 1001, node.ID, expectedDrain, false, 7, event, nil, "")) + must.True(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err := state.NodeByID(ws, node.ID) - require.Nil(err) - require.NotNil(out.DrainStrategy) - require.NotNil(out.LastDrain) - require.Equal(structs.DrainStatusDraining, out.LastDrain.Status) - require.Equal(out.DrainStrategy, expectedDrain) - require.Len(out.Events, 2) - require.EqualValues(1001, out.ModifyIndex) - require.EqualValues(7, out.StatusUpdatedAt) + must.NoError(t, err) + must.NotNil(t, out.DrainStrategy) + must.NotNil(t, out.LastDrain) + must.Eq(t, structs.DrainStatusDraining, out.LastDrain.Status) + must.Eq(t, out.DrainStrategy, expectedDrain) + must.Len(t, 2, out.Events) + must.Eq(t, 1001, out.ModifyIndex) + must.Eq(t, 7, out.StatusUpdatedAt) index, err := state.Index("nodes") - require.Nil(err) - require.EqualValues(1001, index) - require.False(watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1001, index) + must.False(t, watchFired(ws)) } func TestStateStore_AddSingleNodeEvent(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) @@ -1806,16 +1725,16 @@ func TestStateStore_AddSingleNodeEvent(t *testing.T) { // We create a new node event every time we register a node err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - require.Nil(err) + must.NoError(t, err) - require.Equal(1, len(node.Events)) - require.Equal(structs.NodeEventSubsystemCluster, node.Events[0].Subsystem) - require.Equal(NodeRegisterEventRegistered, node.Events[0].Message) + must.Eq(t, 1, len(node.Events)) + must.Eq(t, structs.NodeEventSubsystemCluster, node.Events[0].Subsystem) + must.Eq(t, NodeRegisterEventRegistered, node.Events[0].Message) // Create a watchset so we can test that AddNodeEvent fires the watch ws := memdb.NewWatchSet() _, err = state.NodeByID(ws, node.ID) - require.Nil(err) + must.NoError(t, err) nodeEvent := &structs.NodeEvent{ Message: "failed", @@ -1826,41 +1745,38 @@ func TestStateStore_AddSingleNodeEvent(t *testing.T) { node.ID: {nodeEvent}, } err = state.UpsertNodeEvents(structs.MsgTypeTestSetup, uint64(1001), nodeEvents) - require.Nil(err) + must.NoError(t, err) - require.True(watchFired(ws)) + must.True(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err := state.NodeByID(ws, node.ID) - require.Nil(err) + must.NoError(t, err) - require.Equal(2, len(out.Events)) - require.Equal(nodeEvent, out.Events[1]) + must.Eq(t, 2, len(out.Events)) + must.Eq(t, nodeEvent, out.Events[1]) } // To prevent stale node events from accumulating, we limit the number of // stored node events to 10. func TestStateStore_NodeEvents_RetentionWindow(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) node := mock.Node() err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - if err != nil { - t.Fatalf("err: %v", err) - } - require.Equal(1, len(node.Events)) - require.Equal(structs.NodeEventSubsystemCluster, node.Events[0].Subsystem) - require.Equal(NodeRegisterEventRegistered, node.Events[0].Message) + must.NoError(t, err) + must.Eq(t, 1, len(node.Events)) + must.Eq(t, structs.NodeEventSubsystemCluster, node.Events[0].Subsystem) + must.Eq(t, NodeRegisterEventRegistered, node.Events[0].Message) var out *structs.Node for i := 1; i <= 20; i++ { ws := memdb.NewWatchSet() out, err = state.NodeByID(ws, node.ID) - require.Nil(err) + must.NoError(t, err) nodeEvent := &structs.NodeEvent{ Message: fmt.Sprintf("%dith failed", i), @@ -1872,35 +1788,34 @@ func TestStateStore_NodeEvents_RetentionWindow(t *testing.T) { out.ID: {nodeEvent}, } err := state.UpsertNodeEvents(structs.MsgTypeTestSetup, uint64(i), nodeEvents) - require.Nil(err) + must.NoError(t, err) - require.True(watchFired(ws)) + must.True(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err = state.NodeByID(ws, node.ID) - require.Nil(err) + must.NoError(t, err) } ws := memdb.NewWatchSet() out, err = state.NodeByID(ws, node.ID) - require.Nil(err) + must.NoError(t, err) - require.Equal(10, len(out.Events)) - require.Equal(uint64(11), out.Events[0].CreateIndex) - require.Equal(uint64(20), out.Events[len(out.Events)-1].CreateIndex) + must.Eq(t, 10, len(out.Events)) + must.Eq(t, uint64(11), out.Events[0].CreateIndex) + must.Eq(t, uint64(20), out.Events[len(out.Events)-1].CreateIndex) } func TestStateStore_UpdateNodeDrain_ResetEligiblity(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) node := mock.Node() - require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) + must.Nil(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) // Create a watchset so we can test that update node drain fires the watch ws := memdb.NewWatchSet() _, err := state.NodeByID(ws, node.ID) - require.Nil(err) + must.NoError(t, err) drain := &structs.DrainStrategy{ DrainSpec: structs.DrainSpec{ @@ -1913,8 +1828,8 @@ func TestStateStore_UpdateNodeDrain_ResetEligiblity(t *testing.T) { Subsystem: structs.NodeEventSubsystemDrain, Timestamp: time.Now(), } - require.Nil(state.UpdateNodeDrain(structs.MsgTypeTestSetup, 1001, node.ID, drain, false, 7, event1, nil, "")) - require.True(watchFired(ws)) + must.Nil(t, state.UpdateNodeDrain(structs.MsgTypeTestSetup, 1001, node.ID, drain, false, 7, event1, nil, "")) + must.True(t, watchFired(ws)) // Remove the drain event2 := &structs.NodeEvent{ @@ -1922,68 +1837,64 @@ func TestStateStore_UpdateNodeDrain_ResetEligiblity(t *testing.T) { Subsystem: structs.NodeEventSubsystemDrain, Timestamp: time.Now(), } - require.Nil(state.UpdateNodeDrain(structs.MsgTypeTestSetup, 1002, node.ID, nil, true, 9, event2, nil, "")) + must.Nil(t, state.UpdateNodeDrain(structs.MsgTypeTestSetup, 1002, node.ID, nil, true, 9, event2, nil, "")) ws = memdb.NewWatchSet() out, err := state.NodeByID(ws, node.ID) - require.Nil(err) - require.Nil(out.DrainStrategy) - require.Equal(out.SchedulingEligibility, structs.NodeSchedulingEligible) - require.NotNil(out.LastDrain) - require.Equal(structs.DrainStatusCanceled, out.LastDrain.Status) - require.Equal(time.Unix(7, 0), out.LastDrain.StartedAt) - require.Equal(time.Unix(9, 0), out.LastDrain.UpdatedAt) - require.Len(out.Events, 3) - require.EqualValues(1002, out.ModifyIndex) - require.EqualValues(9, out.StatusUpdatedAt) + must.NoError(t, err) + must.Nil(t, out.DrainStrategy) + must.Eq(t, out.SchedulingEligibility, structs.NodeSchedulingEligible) + must.NotNil(t, out.LastDrain) + must.Eq(t, structs.DrainStatusCanceled, out.LastDrain.Status) + must.Eq(t, time.Unix(7, 0), out.LastDrain.StartedAt) + must.Eq(t, time.Unix(9, 0), out.LastDrain.UpdatedAt) + must.Len(t, 3, out.Events) + must.Eq(t, 1002, out.ModifyIndex) + must.Eq(t, 9, out.StatusUpdatedAt) index, err := state.Index("nodes") - require.Nil(err) - require.EqualValues(1002, index) - require.False(watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1002, index) + must.False(t, watchFired(ws)) } func TestStateStore_UpdateNodeEligibility(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) node := mock.Node() err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) expectedEligibility := structs.NodeSchedulingIneligible // Create a watchset so we can test that update node drain fires the watch ws := memdb.NewWatchSet() - if _, err := state.NodeByID(ws, node.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err = state.NodeByID(ws, node.ID) + must.NoError(t, err) event := &structs.NodeEvent{ Message: "Node marked as ineligible", Subsystem: structs.NodeEventSubsystemCluster, Timestamp: time.Now(), } - require.Nil(state.UpdateNodeEligibility(structs.MsgTypeTestSetup, 1001, node.ID, expectedEligibility, 7, event)) - require.True(watchFired(ws)) + must.Nil(t, state.UpdateNodeEligibility(structs.MsgTypeTestSetup, 1001, node.ID, expectedEligibility, 7, event)) + must.True(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err := state.NodeByID(ws, node.ID) - require.Nil(err) - require.Equal(out.SchedulingEligibility, expectedEligibility) - require.Len(out.Events, 2) - require.Equal(out.Events[1], event) - require.EqualValues(1001, out.ModifyIndex) - require.EqualValues(7, out.StatusUpdatedAt) + must.NoError(t, err) + must.Eq(t, out.SchedulingEligibility, expectedEligibility) + must.Len(t, 2, out.Events) + must.Eq(t, out.Events[1], event) + must.Eq(t, 1001, out.ModifyIndex) + must.Eq(t, 7, out.StatusUpdatedAt) index, err := state.Index("nodes") - require.Nil(err) - require.EqualValues(1001, index) - require.False(watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1001, index) + must.False(t, watchFired(ws)) // Set a drain strategy expectedDrain := &structs.DrainStrategy{ @@ -1991,12 +1902,12 @@ func TestStateStore_UpdateNodeEligibility(t *testing.T) { Deadline: -1 * time.Second, }, } - require.Nil(state.UpdateNodeDrain(structs.MsgTypeTestSetup, 1002, node.ID, expectedDrain, false, 7, nil, nil, "")) + must.Nil(t, state.UpdateNodeDrain(structs.MsgTypeTestSetup, 1002, node.ID, expectedDrain, false, 7, nil, nil, "")) // Try to set the node to eligible err = state.UpdateNodeEligibility(structs.MsgTypeTestSetup, 1003, node.ID, structs.NodeSchedulingEligible, 9, nil) - require.NotNil(err) - require.Contains(err.Error(), "while it is draining") + must.Error(t, err) + must.ErrorContains(t, err, "while it is draining") } func TestStateStore_Nodes(t *testing.T) { @@ -2005,22 +1916,18 @@ func TestStateStore_Nodes(t *testing.T) { state := testStateStore(t) var nodes []*structs.Node - for i := 0; i < 10; i++ { + for i := range 10 { node := mock.Node() nodes = append(nodes, node) err := state.UpsertNode(structs.MsgTypeTestSetup, 1000+uint64(i), node) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) } // Create a watchset so we can test that getters don't cause it to fire ws := memdb.NewWatchSet() iter, err := state.Nodes(ws) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) var out []*structs.Node for { @@ -2034,13 +1941,8 @@ func TestStateStore_Nodes(t *testing.T) { sort.Sort(NodeIDSort(nodes)) sort.Sort(NodeIDSort(out)) - if !reflect.DeepEqual(nodes, out) { - t.Fatalf("bad: %#v %#v", nodes, out) - } - - if watchFired(ws) { - t.Fatalf("bad") - } + must.Eq(t, nodes, out) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_NodesByIDPrefix(t *testing.T) { @@ -2051,16 +1953,12 @@ func TestStateStore_NodesByIDPrefix(t *testing.T) { node.ID = "11111111-662e-d0ab-d1c9-3e434af7bdb4" err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Create a watchset so we can test that getters don't cause it to fire ws := memdb.NewWatchSet() iter, err := state.NodesByIDPrefix(ws, node.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) gatherNodes := func(iter memdb.ResultIterator) []*structs.Node { var nodes []*structs.Node @@ -2076,59 +1974,37 @@ func TestStateStore_NodesByIDPrefix(t *testing.T) { } nodes := gatherNodes(iter) - if len(nodes) != 1 { - t.Fatalf("err: %v", err) - } + must.Len(t, 1, nodes) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) iter, err = state.NodesByIDPrefix(ws, "11") - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) nodes = gatherNodes(iter) - if len(nodes) != 1 { - t.Fatalf("err: %v", err) - } + must.Len(t, 1, nodes) node = mock.Node() node.ID = "11222222-662e-d0ab-d1c9-3e434af7bdb4" err = state.UpsertNode(structs.MsgTypeTestSetup, 1001, node) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() iter, err = state.NodesByIDPrefix(ws, "11") - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) nodes = gatherNodes(iter) - if len(nodes) != 2 { - t.Fatalf("err: %v", err) - } + must.Len(t, 2, nodes) iter, err = state.NodesByIDPrefix(ws, "1111") - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) nodes = gatherNodes(iter) - if len(nodes) != 1 { - t.Fatalf("err: %v", err) - } + must.Len(t, 1, nodes) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_NodesByNodePool(t *testing.T) { @@ -2203,61 +2079,32 @@ func TestStateStore_UpsertJob_Job(t *testing.T) { // Create a watchset so we can test that upsert fires the watch ws := memdb.NewWatchSet() _, err := state.JobByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { - t.Fatalf("err: %v", err) - } - if !watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.JobByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if !reflect.DeepEqual(job, out) { - t.Fatalf("bad: %#v %#v", job, out) - } + must.NoError(t, err) + must.Eq(t, job, out) index, err := state.Index("jobs") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1000 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1000, index) summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if summary == nil { - t.Fatalf("nil summary") - } - if summary.JobID != job.ID { - t.Fatalf("bad summary id: %v", summary.JobID) - } + must.NoError(t, err) + must.NotNil(t, summary) + must.Eq(t, job.ID, summary.JobID, must.Sprint("bad summary id")) _, ok := summary.Summary["web"] - if !ok { - t.Fatalf("nil summary for task group") - } - if watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, ok, must.Sprint("nil summary for task group")) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) // Check the job versions allVersions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(allVersions) != 1 { - t.Fatalf("got %d; want 1", len(allVersions)) - } + must.NoError(t, err) + must.Len(t, 1, allVersions) if a := allVersions[0]; a.ID != job.ID || a.Version != 0 { t.Fatalf("bad: %v", a) @@ -2265,13 +2112,8 @@ func TestStateStore_UpsertJob_Job(t *testing.T) { // Test the looking up the job by version returns the same results vout, err := state.JobByIDAndVersion(ws, job.Namespace, job.ID, 0) - if err != nil { - t.Fatalf("err: %v", err) - } - - if !reflect.DeepEqual(out, vout) { - t.Fatalf("bad: %#v %#v", out, vout) - } + must.NoError(t, err) + must.Eq(t, out, vout) } func TestStateStore_UpdateUpsertJob_Job(t *testing.T) { @@ -2283,73 +2125,40 @@ func TestStateStore_UpdateUpsertJob_Job(t *testing.T) { // Create a watchset so we can test that upsert fires the watch ws := memdb.NewWatchSet() _, err := state.JobByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) job2 := mock.Job() job2.ID = job.ID job2.AllAtOnce = true err = state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.JobByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if !reflect.DeepEqual(job2, out) { - t.Fatalf("bad: %#v %#v", job2, out) - } - - if out.CreateIndex != 1000 { - t.Fatalf("bad: %#v", out) - } - if out.ModifyIndex != 1001 { - t.Fatalf("bad: %#v", out) - } - if out.Version != 1 { - t.Fatalf("bad: %#v", out) - } + must.NoError(t, err) + must.Eq(t, job2, out) + must.Eq(t, 1000, out.CreateIndex) + must.Eq(t, 1001, out.ModifyIndex) + must.Eq(t, 1, out.Version) index, err := state.Index("jobs") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1001 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1001, index) // Test the looking up the job by version returns the same results vout, err := state.JobByIDAndVersion(ws, job.Namespace, job.ID, 1) - if err != nil { - t.Fatalf("err: %v", err) - } - - if !reflect.DeepEqual(out, vout) { - t.Fatalf("bad: %#v %#v", out, vout) - } + must.NoError(t, err) + must.Eq(t, out, vout) // Test that the job summary remains the same if the job is updated but // count remains same summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if summary == nil { - t.Fatalf("nil summary") - } + must.NoError(t, err) + must.NotNil(t, summary) if summary.JobID != job.ID { t.Fatalf("bad summary id: %v", summary.JobID) } @@ -2360,9 +2169,7 @@ func TestStateStore_UpdateUpsertJob_Job(t *testing.T) { // Check the job versions allVersions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) if len(allVersions) != 2 { t.Fatalf("got %d; want 1", len(allVersions)) } @@ -2374,9 +2181,7 @@ func TestStateStore_UpdateUpsertJob_Job(t *testing.T) { t.Fatalf("bad: %+v", a) } - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) { @@ -2388,46 +2193,32 @@ func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) { // Create a watchset so we can test that upsert fires the watch ws := memdb.NewWatchSet() _, err := state.JobByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) // Create a child and an evaluation job2 := job.Copy() job2.Periodic = nil job2.ID = fmt.Sprintf("%v/%s-1490635020", job.ID, structs.PeriodicLaunchSuffix) err = state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) eval := mock.Eval() eval.JobID = job2.ID err = state.UpsertEvals(structs.MsgTypeTestSetup, 1002, []*structs.Evaluation{eval}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) job3 := job.Copy() job3.TaskGroups[0].Tasks[0].Name = "new name" err = state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, job3) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.JobByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) if s, e := out.Status, structs.JobStatusRunning; s != e { t.Fatalf("got status %v; want %v", s, e) @@ -2437,19 +2228,17 @@ func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) { func TestStateStore_UpsertJob_BadNamespace(t *testing.T) { ci.Parallel(t) - - assert := assert.New(t) state := testStateStore(t) job := mock.Job() job.Namespace = "foo" err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) - assert.Contains(err.Error(), "nonexistent namespace") + must.ErrorContains(t, err, "nonexistent namespace") ws := memdb.NewWatchSet() out, err := state.JobByID(ws, job.Namespace, job.ID) - assert.Nil(err) - assert.Nil(out) + must.NoError(t, err) + test.Nil(t, out) } func TestStateStore_UpsertJob_NodePool(t *testing.T) { @@ -2511,40 +2300,22 @@ func TestStateStore_UpsertJob_ChildJob(t *testing.T) { parent := mock.Job() ws := memdb.NewWatchSet() _, err := state.JobByID(ws, parent.Namespace, parent.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, parent); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, parent)) child := mock.Job() child.Status = "" child.ParentID = parent.ID - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, child); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, child)) summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if summary == nil { - t.Fatalf("nil summary") - } - if summary.JobID != parent.ID { - t.Fatalf("bad summary id: %v", parent.ID) - } - if summary.Children == nil { - t.Fatalf("nil children summary") - } - if summary.Children.Pending != 1 || summary.Children.Running != 0 || summary.Children.Dead != 0 { - t.Fatalf("bad children summary: %v", summary.Children) - } - if !watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, err) + must.NotNil(t, summary) + must.Eq(t, parent.ID, summary.JobID) + must.NotNil(t, summary.Children) + must.Eq(t, &structs.JobChildrenSummary{Pending: 1}, summary.Children) + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) } func TestStateStore_UpsertJob_submission(t *testing.T) { @@ -2672,17 +2443,11 @@ func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { // Create a watchset so we can test that upsert fires the watch ws := memdb.NewWatchSet() _, err := state.JobVersionsByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) var finalJob *structs.Job for i := 1; i < 300; i++ { @@ -2690,24 +2455,15 @@ func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { finalJob.ID = job.ID finalJob.Name = fmt.Sprintf("%d", i) err = state.UpsertJob(structs.MsgTypeTestSetup, uint64(1000+i), nil, finalJob) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) } ws = memdb.NewWatchSet() out, err := state.JobByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) + must.Eq(t, finalJob, out) - if !reflect.DeepEqual(finalJob, out) { - t.Fatalf("bad: %#v %#v", finalJob, out) - } - - if out.CreateIndex != 1000 { - t.Fatalf("bad: %#v", out) - } + must.Eq(t, 1000, out.CreateIndex) if out.ModifyIndex != 1299 { t.Fatalf("bad: %#v", out) } @@ -2716,18 +2472,14 @@ func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { } index, err := state.Index("job_version") - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) if index != 1299 { t.Fatalf("bad: %d", index) } // Check the job versions allVersions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) if len(allVersions) != structs.JobDefaultTrackedVersions { t.Fatalf("got %d; want %d", len(allVersions), structs.JobDefaultTrackedVersions) } @@ -2745,9 +2497,7 @@ func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { t.Fatalf("bad: %+v", a) } - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_DeleteJob_Job(t *testing.T) { @@ -2757,78 +2507,46 @@ func TestStateStore_DeleteJob_Job(t *testing.T) { job := mock.Job() err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() - if _, err := state.JobByID(ws, job.Namespace, job.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err = state.JobByID(ws, job.Namespace, job.ID) + must.NoError(t, err) err = state.DeleteJob(1001, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.JobByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if out != nil { - t.Fatalf("bad: %#v %#v", job, out) - } + must.NoError(t, err) + must.Nil(t, out) index, err := state.Index("jobs") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1001 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1001, index) summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) if summary != nil { t.Fatalf("expected summary to be nil, but got: %v", summary) } index, err = state.Index("job_summary") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1001 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1001, index) versions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(versions) != 0 { - t.Fatalf("expected no job versions") - } + must.NoError(t, err) + must.Len(t, 0, versions, must.Sprint("expected no job versions")) index, err = state.Index("job_summary") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1001 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1001, index) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_DeleteJobTxn_BatchDeletes(t *testing.T) { @@ -2842,12 +2560,12 @@ func TestStateStore_DeleteJobTxn_BatchDeletes(t *testing.T) { stateIndex := uint64(1000) jobs := make([]*structs.Job, testJobCount) - for i := 0; i < testJobCount; i++ { + for i := range testJobCount { stateIndex++ job := mock.BatchJob() err := state.UpsertJob(structs.MsgTypeTestSetup, stateIndex, nil, job) - require.NoError(t, err) + must.NoError(t, err) jobs[i] = job @@ -2860,7 +2578,7 @@ func TestStateStore_DeleteJobTxn_BatchDeletes(t *testing.T) { "Version": fmt.Sprintf("%d", vi), } - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, stateIndex, nil, job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, stateIndex, nil, job)) } } @@ -2868,38 +2586,38 @@ func TestStateStore_DeleteJobTxn_BatchDeletes(t *testing.T) { // Check that jobs are present in DB job, err := state.JobByID(ws, jobs[0].Namespace, jobs[0].ID) - require.NoError(t, err) - require.Equal(t, jobs[0].ID, job.ID) + must.NoError(t, err) + must.Eq(t, jobs[0].ID, job.ID) jobVersions, err := state.JobVersionsByID(ws, jobs[0].Namespace, jobs[0].ID) - require.NoError(t, err) - require.Equal(t, jobVersionCount, len(jobVersions)) + must.NoError(t, err) + must.Eq(t, jobVersionCount, len(jobVersions)) // Actually delete const deletionIndex = uint64(10001) err = state.WithWriteTransaction(structs.MsgTypeTestSetup, deletionIndex, func(txn Txn) error { for i, job := range jobs { err := state.DeleteJobTxn(deletionIndex, job.Namespace, job.ID, txn) - require.NoError(t, err, "failed at %d %e", i, err) + must.NoError(t, err, must.Sprintf("failed at %d %e", i, err)) } return nil }) - assert.NoError(t, err) + must.NoError(t, err) - assert.True(t, watchFired(ws)) + test.True(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err := state.JobByID(ws, jobs[0].Namespace, jobs[0].ID) - require.NoError(t, err) - require.Nil(t, out) + must.NoError(t, err) + must.Nil(t, out) jobVersions, err = state.JobVersionsByID(ws, jobs[0].Namespace, jobs[0].ID) - require.NoError(t, err) - require.Empty(t, jobVersions) + must.NoError(t, err) + must.Len(t, 0, jobVersions) index, err := state.Index("jobs") - require.NoError(t, err) - require.Equal(t, deletionIndex, index) + must.NoError(t, err) + must.Eq(t, deletionIndex, index) } // TestStatestore_JobVersionTag tests that job versions which are tagged @@ -3054,7 +2772,6 @@ func TestStateStore_DeleteJob_MultipleVersions(t *testing.T) { ci.Parallel(t) state := testStateStore(t) - assert := assert.New(t) // Create a job and mark it as stable job := mock.Job() @@ -3064,47 +2781,47 @@ func TestStateStore_DeleteJob_MultipleVersions(t *testing.T) { // Create a watchset so we can test that upsert fires the watch ws := memdb.NewWatchSet() _, err := state.JobVersionsByID(ws, job.Namespace, job.ID) - assert.Nil(err) - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) - assert.True(watchFired(ws)) + must.NoError(t, err) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) + test.True(t, watchFired(ws)) var finalJob *structs.Job for i := 1; i < 20; i++ { finalJob = mock.Job() finalJob.ID = job.ID finalJob.Priority = i - assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, uint64(1000+i), nil, finalJob)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, uint64(1000+i), nil, finalJob)) } - assert.Nil(state.DeleteJob(1020, job.Namespace, job.ID)) - assert.True(watchFired(ws)) + must.NoError(t, state.DeleteJob(1020, job.Namespace, job.ID)) + test.True(t, watchFired(ws)) ws = memdb.NewWatchSet() out, err := state.JobByID(ws, job.Namespace, job.ID) - assert.Nil(err) - assert.Nil(out) + must.NoError(t, err) + test.Nil(t, out) index, err := state.Index("jobs") - assert.Nil(err) - assert.EqualValues(1020, index) + must.NoError(t, err) + test.Eq(t, 1020, index) summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) - assert.Nil(err) - assert.Nil(summary) + must.NoError(t, err) + test.Nil(t, summary) index, err = state.Index("job_version") - assert.Nil(err) - assert.EqualValues(1020, index) + must.NoError(t, err) + test.Eq(t, 1020, index) versions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) - assert.Nil(err) - assert.Len(versions, 0) + must.NoError(t, err) + test.Len(t, 0, versions) index, err = state.Index("job_summary") - assert.Nil(err) - assert.EqualValues(1020, index) + must.NoError(t, err) + test.Eq(t, 1020, index) - assert.False(watchFired(ws)) + must.False(t, watchFired(ws), must.Sprint("expected watch not to fire")) } func TestStateStore_DeleteJob_ChildJob(t *testing.T) { @@ -3113,52 +2830,31 @@ func TestStateStore_DeleteJob_ChildJob(t *testing.T) { state := testStateStore(t) parent := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent)) child := mock.Job() child.Status = "" child.ParentID = parent.ID - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child)) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() - if _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) + must.NoError(t, err) - err := state.DeleteJob(1001, child.Namespace, child.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if !watchFired(ws) { - t.Fatalf("bad") - } + err = state.DeleteJob(1001, child.Namespace, child.ID) + must.NoError(t, err) + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if summary == nil { - t.Fatalf("nil summary") - } - if summary.JobID != parent.ID { - t.Fatalf("bad summary id: %v", parent.ID) - } - if summary.Children == nil { - t.Fatalf("nil children summary") - } - if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { - t.Fatalf("bad children summary: %v", summary.Children) - } - if watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, err) + must.NotNil(t, summary) + must.Eq(t, parent.ID, summary.JobID) + must.NotNil(t, summary.Children) + must.Eq(t, &structs.JobChildrenSummary{Dead: 1}, summary.Children) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_Jobs(t *testing.T) { @@ -3167,21 +2863,17 @@ func TestStateStore_Jobs(t *testing.T) { state := testStateStore(t) var jobs []*structs.Job - for i := 0; i < 10; i++ { + for i := range 10 { job := mock.Job() jobs = append(jobs, job) err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) } ws := memdb.NewWatchSet() iter, err := state.Jobs(ws, SortDefault) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) var out []*structs.Job for { @@ -3195,12 +2887,8 @@ func TestStateStore_Jobs(t *testing.T) { sort.Sort(JobIDSort(jobs)) sort.Sort(JobIDSort(out)) - if !reflect.DeepEqual(jobs, out) { - t.Fatalf("bad: %#v %#v", jobs, out) - } - if watchFired(ws) { - t.Fatalf("bad") - } + must.Eq(t, jobs, out) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_JobVersions(t *testing.T) { @@ -3209,21 +2897,17 @@ func TestStateStore_JobVersions(t *testing.T) { state := testStateStore(t) var jobs []*structs.Job - for i := 0; i < 10; i++ { + for i := range 10 { job := mock.Job() jobs = append(jobs, job) err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) } ws := memdb.NewWatchSet() iter, err := state.JobVersions(ws) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) var out []*structs.Job for { @@ -3237,12 +2921,8 @@ func TestStateStore_JobVersions(t *testing.T) { sort.Sort(JobIDSort(jobs)) sort.Sort(JobIDSort(out)) - if !reflect.DeepEqual(jobs, out) { - t.Fatalf("bad: %#v %#v", jobs, out) - } - if watchFired(ws) { - t.Fatalf("bad") - } + must.Eq(t, jobs, out) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_JobsByIDPrefix(t *testing.T) { @@ -3253,15 +2933,11 @@ func TestStateStore_JobsByIDPrefix(t *testing.T) { job.ID = "redis" err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) ws := memdb.NewWatchSet() iter, err := state.JobsByIDPrefix(ws, job.Namespace, job.ID, SortDefault) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) gatherJobs := func(iter memdb.ResultIterator) []*structs.Job { var jobs []*structs.Job @@ -3281,34 +2957,24 @@ func TestStateStore_JobsByIDPrefix(t *testing.T) { } iter, err = state.JobsByIDPrefix(ws, job.Namespace, "re", SortDefault) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) jobs = gatherJobs(iter) if len(jobs) != 1 { t.Fatalf("err: %v", err) } - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) job = mock.Job() job.ID = "riak" err = state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() iter, err = state.JobsByIDPrefix(ws, job.Namespace, "r", SortDefault) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) jobs = gatherJobs(iter) if len(jobs) != 2 { @@ -3316,17 +2982,13 @@ func TestStateStore_JobsByIDPrefix(t *testing.T) { } iter, err = state.JobsByIDPrefix(ws, job.Namespace, "ri", SortDefault) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) jobs = gatherJobs(iter) if len(jobs) != 1 { t.Fatalf("err: %v", err) } - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_JobsByIDPrefix_Namespaces(t *testing.T) { @@ -3347,9 +3009,9 @@ func TestStateStore_JobsByIDPrefix_Namespaces(t *testing.T) { job1.Namespace = ns1.Name job2.Namespace = ns2.Name - require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job1)) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2)) + must.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job1)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2)) gatherJobs := func(iter memdb.ResultIterator) []*structs.Job { var jobs []*structs.Job @@ -3366,50 +3028,50 @@ func TestStateStore_JobsByIDPrefix_Namespaces(t *testing.T) { // Try full match ws := memdb.NewWatchSet() iter1, err := state.JobsByIDPrefix(ws, ns1.Name, jobID, SortDefault) - require.NoError(t, err) + must.NoError(t, err) iter2, err := state.JobsByIDPrefix(ws, ns2.Name, jobID, SortDefault) - require.NoError(t, err) + must.NoError(t, err) jobsNs1 := gatherJobs(iter1) - require.Len(t, jobsNs1, 1) + must.Len(t, 1, jobsNs1) jobsNs2 := gatherJobs(iter2) - require.Len(t, jobsNs2, 1) + must.Len(t, 1, jobsNs2) // Try prefix iter1, err = state.JobsByIDPrefix(ws, ns1.Name, "re", SortDefault) - require.NoError(t, err) + must.NoError(t, err) iter2, err = state.JobsByIDPrefix(ws, ns2.Name, "re", SortDefault) - require.NoError(t, err) + must.NoError(t, err) jobsNs1 = gatherJobs(iter1) jobsNs2 = gatherJobs(iter2) - require.Len(t, jobsNs1, 1) - require.Len(t, jobsNs2, 1) + must.Len(t, 1, jobsNs1) + must.Len(t, 1, jobsNs2) job3 := mock.Job() job3.ID = "riak" job3.Namespace = ns1.Name - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, job3)) - require.True(t, watchFired(ws)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, job3)) + must.True(t, watchFired(ws)) ws = memdb.NewWatchSet() iter1, err = state.JobsByIDPrefix(ws, ns1.Name, "r", SortDefault) - require.NoError(t, err) + must.NoError(t, err) iter2, err = state.JobsByIDPrefix(ws, ns2.Name, "r", SortDefault) - require.NoError(t, err) + must.NoError(t, err) jobsNs1 = gatherJobs(iter1) jobsNs2 = gatherJobs(iter2) - require.Len(t, jobsNs1, 2) - require.Len(t, jobsNs2, 1) + must.Len(t, 2, jobsNs1) + must.Len(t, 1, jobsNs2) iter1, err = state.JobsByIDPrefix(ws, ns1.Name, "ri", SortDefault) - require.NoError(t, err) + must.NoError(t, err) jobsNs1 = gatherJobs(iter1) - require.Len(t, jobsNs1, 1) - require.False(t, watchFired(ws)) + must.Len(t, 1, jobsNs1) + must.False(t, watchFired(ws)) } func TestStateStore_JobsByNamespace(t *testing.T) { @@ -3430,27 +3092,27 @@ func TestStateStore_JobsByNamespace(t *testing.T) { job3.Namespace = ns2.Name job4.Namespace = ns2.Name - require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + must.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) // Create watchsets so we can test that update fires the watch watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} _, err := state.JobsByNamespace(watches[0], ns1.Name, SortDefault) - require.NoError(t, err) + must.NoError(t, err) _, err = state.JobsByNamespace(watches[1], ns2.Name, SortDefault) - require.NoError(t, err) + must.NoError(t, err) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job1)) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1002, nil, job2)) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, job3)) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1004, nil, job4)) - require.True(t, watchFired(watches[0])) - require.True(t, watchFired(watches[1])) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job1)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1002, nil, job2)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, job3)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1004, nil, job4)) + must.True(t, watchFired(watches[0])) + must.True(t, watchFired(watches[1])) ws := memdb.NewWatchSet() iter1, err := state.JobsByNamespace(ws, ns1.Name, SortDefault) - require.NoError(t, err) + must.NoError(t, err) iter2, err := state.JobsByNamespace(ws, ns2.Name, SortDefault) - require.NoError(t, err) + must.NoError(t, err) var out1 []*structs.Job for { @@ -3470,20 +3132,20 @@ func TestStateStore_JobsByNamespace(t *testing.T) { out2 = append(out2, raw.(*structs.Job)) } - require.Len(t, out1, 2) - require.Len(t, out2, 2) + must.Len(t, 2, out1) + must.Len(t, 2, out2) for _, job := range out1 { - require.Equal(t, ns1.Name, job.Namespace) + must.Eq(t, ns1.Name, job.Namespace) } for _, job := range out2 { - require.Equal(t, ns2.Name, job.Namespace) + must.Eq(t, ns2.Name, job.Namespace) } index, err := state.Index("jobs") - require.NoError(t, err) - require.EqualValues(t, 1004, index) - require.False(t, watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1004, index) + must.False(t, watchFired(ws)) } func TestStateStore_JobsByPeriodic(t *testing.T) { @@ -3492,31 +3154,25 @@ func TestStateStore_JobsByPeriodic(t *testing.T) { state := testStateStore(t) var periodic, nonPeriodic []*structs.Job - for i := 0; i < 10; i++ { + for i := range 10 { job := mock.Job() nonPeriodic = append(nonPeriodic, job) err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) } - for i := 0; i < 10; i++ { + for i := range 10 { job := mock.PeriodicJob() periodic = append(periodic, job) err := state.UpsertJob(structs.MsgTypeTestSetup, 2000+uint64(i), nil, job) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) } ws := memdb.NewWatchSet() iter, err := state.JobsByPeriodic(ws, true) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) var outPeriodic []*structs.Job for { @@ -3528,9 +3184,7 @@ func TestStateStore_JobsByPeriodic(t *testing.T) { } iter, err = state.JobsByPeriodic(ws, false) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) var outNonPeriodic []*structs.Job for { @@ -3546,16 +3200,9 @@ func TestStateStore_JobsByPeriodic(t *testing.T) { sort.Sort(JobIDSort(outPeriodic)) sort.Sort(JobIDSort(outNonPeriodic)) - if !reflect.DeepEqual(periodic, outPeriodic) { - t.Fatalf("bad: %#v %#v", periodic, outPeriodic) - } - - if !reflect.DeepEqual(nonPeriodic, outNonPeriodic) { - t.Fatalf("bad: %#v %#v", nonPeriodic, outNonPeriodic) - } - if watchFired(ws) { - t.Fatalf("bad") - } + must.Eq(t, periodic, outPeriodic) + must.Eq(t, nonPeriodic, outNonPeriodic) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_JobsByScheduler(t *testing.T) { @@ -3565,32 +3212,26 @@ func TestStateStore_JobsByScheduler(t *testing.T) { var serviceJobs []*structs.Job var sysJobs []*structs.Job - for i := 0; i < 10; i++ { + for i := range 10 { job := mock.Job() serviceJobs = append(serviceJobs, job) err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) } - for i := 0; i < 10; i++ { + for i := range 10 { job := mock.SystemJob() job.Status = structs.JobStatusRunning sysJobs = append(sysJobs, job) err := state.UpsertJob(structs.MsgTypeTestSetup, 2000+uint64(i), nil, job) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) } ws := memdb.NewWatchSet() iter, err := state.JobsByScheduler(ws, "service") - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) var outService []*structs.Job for { @@ -3602,9 +3243,7 @@ func TestStateStore_JobsByScheduler(t *testing.T) { } iter, err = state.JobsByScheduler(ws, "system") - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) var outSystem []*structs.Job for { @@ -3620,16 +3259,9 @@ func TestStateStore_JobsByScheduler(t *testing.T) { sort.Sort(JobIDSort(outService)) sort.Sort(JobIDSort(outSystem)) - if !reflect.DeepEqual(serviceJobs, outService) { - t.Fatalf("bad: %#v %#v", serviceJobs, outService) - } - - if !reflect.DeepEqual(sysJobs, outSystem) { - t.Fatalf("bad: %#v %#v", sysJobs, outSystem) - } - if watchFired(ws) { - t.Fatalf("bad") - } + must.Eq(t, serviceJobs, outService) + must.Eq(t, sysJobs, outSystem) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_JobsByGC(t *testing.T) { @@ -3638,7 +3270,7 @@ func TestStateStore_JobsByGC(t *testing.T) { state := testStateStore(t) gc, nonGc := make(map[string]struct{}), make(map[string]struct{}) - for i := 0; i < 20; i++ { + for i := range 20 { var job *structs.Job if i%2 == 0 { job = mock.Job() @@ -3647,9 +3279,7 @@ func TestStateStore_JobsByGC(t *testing.T) { } nonGc[job.ID] = struct{}{} - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job)) } for i := 0; i < 20; i += 2 { @@ -3659,26 +3289,20 @@ func TestStateStore_JobsByGC(t *testing.T) { job.ModifyIndex = idx gc[job.ID] = struct{}{} - if err := state.UpsertJob(structs.MsgTypeTestSetup, idx, nil, job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, idx, nil, job)) // Create an eval for it eval := mock.Eval() eval.JobID = job.ID eval.JobModifyIndex = job.ModifyIndex eval.Status = structs.EvalStatusComplete - if err := state.UpsertEvals(structs.MsgTypeTestSetup, idx, []*structs.Evaluation{eval}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, idx, []*structs.Evaluation{eval})) } ws := memdb.NewWatchSet() iter, err := state.JobsByGC(ws, true) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) outGc := make(map[string]struct{}) for i := iter.Next(); i != nil; i = iter.Next() { @@ -3687,9 +3311,7 @@ func TestStateStore_JobsByGC(t *testing.T) { } iter, err = state.JobsByGC(ws, false) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) outNonGc := make(map[string]struct{}) for i := iter.Next(); i != nil; i = iter.Next() { @@ -3697,16 +3319,9 @@ func TestStateStore_JobsByGC(t *testing.T) { outNonGc[j.ID] = struct{}{} } - if !reflect.DeepEqual(gc, outGc) { - t.Fatalf("bad: %#v %#v", gc, outGc) - } - - if !reflect.DeepEqual(nonGc, outNonGc) { - t.Fatalf("bad: %#v %#v", nonGc, outNonGc) - } - if watchFired(ws) { - t.Fatalf("bad") - } + must.Eq(t, gc, outGc) + must.Eq(t, nonGc, outNonGc) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_UpsertPeriodicLaunch(t *testing.T) { @@ -3722,46 +3337,28 @@ func TestStateStore_UpsertPeriodicLaunch(t *testing.T) { // Create a watchset so we can test that upsert fires the watch ws := memdb.NewWatchSet() - if _, err := state.PeriodicLaunchByID(ws, job.Namespace, launch.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err := state.PeriodicLaunchByID(ws, job.Namespace, launch.ID) + must.NoError(t, err) - err := state.UpsertPeriodicLaunch(1000, launch) - if err != nil { - t.Fatalf("err: %v", err) - } + err = state.UpsertPeriodicLaunch(1000, launch) + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.PeriodicLaunchByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if out.CreateIndex != 1000 { - t.Fatalf("bad: %#v", out) - } + must.NoError(t, err) + must.Eq(t, 1000, out.CreateIndex) if out.ModifyIndex != 1000 { t.Fatalf("bad: %#v", out) } - if !reflect.DeepEqual(launch, out) { - t.Fatalf("bad: %#v %#v", job, out) - } - + must.Eq(t, launch, out) index, err := state.Index("periodic_launch") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1000 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1000, index) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_UpdateUpsertPeriodicLaunch(t *testing.T) { @@ -3776,15 +3373,12 @@ func TestStateStore_UpdateUpsertPeriodicLaunch(t *testing.T) { } err := state.UpsertPeriodicLaunch(1000, launch) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Create a watchset so we can test that upsert fires the watch ws := memdb.NewWatchSet() - if _, err := state.PeriodicLaunchByID(ws, job.Namespace, launch.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err = state.PeriodicLaunchByID(ws, job.Namespace, launch.ID) + must.NoError(t, err) launch2 := &structs.PeriodicLaunch{ ID: job.ID, @@ -3792,41 +3386,22 @@ func TestStateStore_UpdateUpsertPeriodicLaunch(t *testing.T) { Launch: launch.Launch.Add(1 * time.Second), } err = state.UpsertPeriodicLaunch(1001, launch2) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.PeriodicLaunchByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if out.CreateIndex != 1000 { - t.Fatalf("bad: %#v", out) - } - if out.ModifyIndex != 1001 { - t.Fatalf("bad: %#v", out) - } - - if !reflect.DeepEqual(launch2, out) { - t.Fatalf("bad: %#v %#v", launch2, out) - } + must.NoError(t, err) + must.Eq(t, 1000, out.CreateIndex) + must.Eq(t, 1001, out.ModifyIndex) + must.Eq(t, launch2, out) index, err := state.Index("periodic_launch") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1001 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1001, index) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_DeletePeriodicLaunch(t *testing.T) { @@ -3841,46 +3416,28 @@ func TestStateStore_DeletePeriodicLaunch(t *testing.T) { } err := state.UpsertPeriodicLaunch(1000, launch) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() - if _, err := state.PeriodicLaunchByID(ws, job.Namespace, launch.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err = state.PeriodicLaunchByID(ws, job.Namespace, launch.ID) + must.NoError(t, err) err = state.DeletePeriodicLaunch(1001, launch.Namespace, launch.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.PeriodicLaunchByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if out != nil { - t.Fatalf("bad: %#v %#v", job, out) - } + must.NoError(t, err) + must.Nil(t, out) index, err := state.Index("periodic_launch") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1001 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1001, index) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_PeriodicLaunches(t *testing.T) { @@ -3889,7 +3446,7 @@ func TestStateStore_PeriodicLaunches(t *testing.T) { state := testStateStore(t) var launches []*structs.PeriodicLaunch - for i := 0; i < 10; i++ { + for i := range 10 { job := mock.Job() launch := &structs.PeriodicLaunch{ ID: job.ID, @@ -3899,16 +3456,12 @@ func TestStateStore_PeriodicLaunches(t *testing.T) { launches = append(launches, launch) err := state.UpsertPeriodicLaunch(1000+uint64(i), launch) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) } ws := memdb.NewWatchSet() iter, err := state.PeriodicLaunches(ws) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) out := make(map[string]*structs.PeriodicLaunch, 10) for { @@ -3926,24 +3479,13 @@ func TestStateStore_PeriodicLaunches(t *testing.T) { for _, launch := range launches { l, ok := out[launch.ID] - if !ok { - t.Fatalf("bad %v", launch.ID) - } - - if !reflect.DeepEqual(launch, l) { - t.Fatalf("bad: %#v %#v", launch, l) - } - + must.True(t, ok, must.Sprintf("bad %v", launch.ID)) + must.Eq(t, launch, l) delete(out, launch.ID) } - if len(out) != 0 { - t.Fatalf("leftover: %#v", out) - } - - if watchFired(ws) { - t.Fatalf("bad") - } + must.MapLen(t, 0, out, must.Sprintf("leftover: %#v", out)) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } // TestStateStore_CSIVolume checks register, list and deregister for csi_volumes @@ -3987,14 +3529,14 @@ func TestStateStore_CSIVolume(t *testing.T) { index++ err := state.UpsertNode(structs.MsgTypeTestSetup, index, node) - require.NoError(t, err) + must.NoError(t, err) defer state.DeleteNode(structs.MsgTypeTestSetup, 9999, []string{pluginID}) now := time.Now().UnixNano() index++ err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc}) - require.NoError(t, err) + must.NoError(t, err) ns := structs.DefaultNamespace @@ -4025,22 +3567,22 @@ func TestStateStore_CSIVolume(t *testing.T) { index++ err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0, v1}) - require.NoError(t, err) + must.NoError(t, err) // volume registration is idempotent, unless identies are changed index++ err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0, v1}) - require.NoError(t, err) + must.NoError(t, err) index++ v2 := v0.Copy() v2.PluginID = "new-id" err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v2}) - require.Error(t, err, fmt.Sprintf("volume exists: %s", v0.ID)) + must.EqError(t, err, fmt.Sprintf("volume identity cannot be updated: %s", v0.ID)) ws := memdb.NewWatchSet() iter, err := state.CSIVolumesByNamespace(ws, ns, "") - require.NoError(t, err) + must.NoError(t, err) slurp := func(iter memdb.ResultIterator) (vs []*structs.CSIVolume) { for { @@ -4055,26 +3597,26 @@ func TestStateStore_CSIVolume(t *testing.T) { } vs := slurp(iter) - require.Equal(t, 2, len(vs)) + must.Len(t, 2, vs) ws = memdb.NewWatchSet() iter, err = state.CSIVolumesByPluginID(ws, ns, "", "minnie") - require.NoError(t, err) + must.NoError(t, err) vs = slurp(iter) - require.Equal(t, 1, len(vs)) + must.Eq(t, 1, len(vs)) ws = memdb.NewWatchSet() iter, err = state.CSIVolumesByNodeID(ws, "", node.ID) - require.NoError(t, err) + must.NoError(t, err) vs = slurp(iter) - require.Equal(t, 1, len(vs)) + must.Eq(t, 1, len(vs)) // Allocs a0 := mock.Alloc() a1 := mock.Alloc() index++ err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{a0, a1}) - require.NoError(t, err) + must.NoError(t, err) // Claims r := structs.CSIVolumeClaimRead @@ -4093,42 +3635,42 @@ func TestStateStore_CSIVolume(t *testing.T) { index++ err = state.CSIVolumeClaim(index, now, ns, vol0, claim0) - require.NoError(t, err) + must.NoError(t, err) index++ err = state.CSIVolumeClaim(index, now, ns, vol0, claim1) - require.NoError(t, err) + must.NoError(t, err) ws = memdb.NewWatchSet() iter, err = state.CSIVolumesByPluginID(ws, ns, "", "minnie") - require.NoError(t, err) + must.NoError(t, err) vs = slurp(iter) - require.False(t, vs[0].HasFreeWriteClaims()) + must.False(t, vs[0].HasFreeWriteClaims()) claim2 := new(structs.CSIVolumeClaim) *claim2 = *claim0 claim2.Mode = u err = state.CSIVolumeClaim(2, now, ns, vol0, claim2) - require.NoError(t, err) + must.NoError(t, err) ws = memdb.NewWatchSet() iter, err = state.CSIVolumesByPluginID(ws, ns, "", "minnie") - require.NoError(t, err) + must.NoError(t, err) vs = slurp(iter) - require.True(t, vs[0].ReadSchedulable()) + must.True(t, vs[0].ReadSchedulable()) // deregistration is an error when the volume is in use index++ err = state.CSIVolumeDeregister(index, ns, []string{vol0}, false) - require.Error(t, err, "volume deregistered while in use") + must.Error(t, err, must.Sprint("volume deregistered while in use")) // even if forced, because we have a non-terminal claim index++ err = state.CSIVolumeDeregister(index, ns, []string{vol0}, true) - require.Error(t, err, "volume force deregistered while in use") + must.Error(t, err, must.Sprint("volume force deregistered while in use")) // we use the ID, not a prefix index++ err = state.CSIVolumeDeregister(index, ns, []string{"fo"}, true) - require.Error(t, err, "volume deregistered by prefix") + must.Error(t, err, must.Sprint("volume deregistered by prefix")) // release claims to unblock deregister index++ @@ -4136,29 +3678,29 @@ func TestStateStore_CSIVolume(t *testing.T) { *claim3 = *claim2 claim3.State = structs.CSIVolumeClaimStateReadyToFree err = state.CSIVolumeClaim(index, now, ns, vol0, claim3) - require.NoError(t, err) + must.NoError(t, err) index++ claim1.Mode = u claim1.State = structs.CSIVolumeClaimStateReadyToFree err = state.CSIVolumeClaim(index, now, ns, vol0, claim1) - require.NoError(t, err) + must.NoError(t, err) index++ err = state.CSIVolumeDeregister(index, ns, []string{vol0}, false) - require.NoError(t, err) + must.NoError(t, err) // List, now omitting the deregistered volume ws = memdb.NewWatchSet() iter, err = state.CSIVolumesByPluginID(ws, ns, "", "minnie") - require.NoError(t, err) + must.NoError(t, err) vs = slurp(iter) - require.Equal(t, 0, len(vs)) + must.Eq(t, 0, len(vs)) ws = memdb.NewWatchSet() iter, err = state.CSIVolumesByNamespace(ws, ns, "") - require.NoError(t, err) + must.NoError(t, err) vs = slurp(iter) - require.Equal(t, 1, len(vs)) + must.Eq(t, 1, len(vs)) } func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { @@ -4518,14 +4060,10 @@ func TestStateStore_Indexes(t *testing.T) { node := mock.Node() err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) iter, err := state.Indexes() - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) var out []*IndexEntry for { @@ -4561,19 +4099,13 @@ func TestStateStore_LatestIndex(t *testing.T) { state := testStateStore(t) - if err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, mock.Node()); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, mock.Node())) exp := uint64(2000) - if err := state.UpsertJob(structs.MsgTypeTestSetup, exp, nil, mock.Job()); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, exp, nil, mock.Job())) latest, err := state.LatestIndex() - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) if latest != exp { t.Fatalf("LatestIndex() returned %d; want %d", latest, exp) @@ -4588,40 +4120,25 @@ func TestStateStore_UpsertEvals_Eval(t *testing.T) { // Create a watchset so we can test that upsert fires the watch ws := memdb.NewWatchSet() - if _, err := state.EvalByID(ws, eval.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err := state.EvalByID(ws, eval.ID) + must.NoError(t, err) - err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) - if err != nil { - t.Fatalf("err: %v", err) - } + err = state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.EvalByID(ws, eval.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !reflect.DeepEqual(eval, out) { - t.Fatalf("bad: %#v %#v", eval, out) - } + must.Eq(t, eval, out) index, err := state.Index("evals") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1000 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1000, index) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_UpsertEvals_CancelBlocked(t *testing.T) { @@ -4638,9 +4155,7 @@ func TestStateStore_UpsertEvals_CancelBlocked(t *testing.T) { b2.Status = structs.EvalStatusBlocked err := state.UpsertEvals(structs.MsgTypeTestSetup, 999, []*structs.Evaluation{b1, b2}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Create one complete and successful eval for the job eval := mock.Eval() @@ -4650,49 +4165,31 @@ func TestStateStore_UpsertEvals_CancelBlocked(t *testing.T) { // Create a watchset so we can test that the upsert of the complete eval // fires the watch ws := memdb.NewWatchSet() - if _, err := state.EvalByID(ws, b1.ID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.EvalByID(ws, b2.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err = state.EvalByID(ws, b1.ID) + must.NoError(t, err) + _, err = state.EvalByID(ws, b2.ID) + must.NoError(t, err) - if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.EvalByID(ws, eval.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !reflect.DeepEqual(eval, out) { - t.Fatalf("bad: %#v %#v", eval, out) - } + must.Eq(t, eval, out) index, err := state.Index("evals") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1000 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1000, index) // Get b1/b2 and check they are cancelled out1, err := state.EvalByID(ws, b1.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) out2, err := state.EvalByID(ws, b2.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) if out1.Status != structs.EvalStatusCancelled || out2.Status != structs.EvalStatusCancelled { t.Fatalf("bad: %#v %#v", out1, out2) @@ -4706,9 +4203,7 @@ func TestStateStore_UpsertEvals_CancelBlocked(t *testing.T) { t.Fatalf("bad modify time %#v %#v", out1, out2) } - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_UpsertEvals_Namespace(t *testing.T) { @@ -4729,24 +4224,24 @@ func TestStateStore_UpsertEvals_Namespace(t *testing.T) { eval3.Namespace = ns2.Name eval4.Namespace = ns2.Name - require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + must.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) // Create watchsets so we can test that update fires the watch watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} _, err := state.EvalsByNamespace(watches[0], ns1.Name) - require.NoError(t, err) + must.NoError(t, err) _, err = state.EvalsByNamespace(watches[1], ns2.Name) - require.NoError(t, err) + must.NoError(t, err) - require.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval1, eval2, eval3, eval4})) - require.True(t, watchFired(watches[0])) - require.True(t, watchFired(watches[1])) + must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval1, eval2, eval3, eval4})) + must.True(t, watchFired(watches[0])) + must.True(t, watchFired(watches[1])) ws := memdb.NewWatchSet() iter1, err := state.EvalsByNamespace(ws, ns1.Name) - require.NoError(t, err) + must.NoError(t, err) iter2, err := state.EvalsByNamespace(ws, ns2.Name) - require.NoError(t, err) + must.NoError(t, err) var out1 []*structs.Evaluation for { @@ -4766,20 +4261,20 @@ func TestStateStore_UpsertEvals_Namespace(t *testing.T) { out2 = append(out2, raw.(*structs.Evaluation)) } - require.Len(t, out1, 2) - require.Len(t, out2, 2) + must.Len(t, 2, out1) + must.Len(t, 2, out2) for _, eval := range out1 { - require.Equal(t, ns1.Name, eval.Namespace) + must.Eq(t, ns1.Name, eval.Namespace) } for _, eval := range out2 { - require.Equal(t, ns2.Name, eval.Namespace) + must.Eq(t, ns2.Name, eval.Namespace) } index, err := state.Index("evals") - require.NoError(t, err) - require.EqualValues(t, 1001, index) - require.False(t, watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1001, index) + must.False(t, watchFired(ws)) } func TestStateStore_Update_UpsertEvals_Eval(t *testing.T) { @@ -4789,64 +4284,40 @@ func TestStateStore_Update_UpsertEvals_Eval(t *testing.T) { eval := mock.Eval() err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Create a watchset so we can test that delete fires the watch ws := memdb.NewWatchSet() ws2 := memdb.NewWatchSet() - if _, err := state.EvalByID(ws, eval.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err = state.EvalByID(ws, eval.ID) + must.NoError(t, err) - if _, err := state.EvalsByJob(ws2, eval.Namespace, eval.JobID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err = state.EvalsByJob(ws2, eval.Namespace, eval.JobID) + must.NoError(t, err) eval2 := mock.Eval() eval2.ID = eval.ID eval2.JobID = eval.JobID err = state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval2}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } - if !watchFired(ws2) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) + must.True(t, watchFired(ws2)) ws = memdb.NewWatchSet() out, err := state.EvalByID(ws, eval.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !reflect.DeepEqual(eval2, out) { - t.Fatalf("bad: %#v %#v", eval2, out) - } + must.Eq(t, eval2, out) - if out.CreateIndex != 1000 { - t.Fatalf("bad: %#v", out) - } - if out.ModifyIndex != 1001 { - t.Fatalf("bad: %#v", out) - } + must.Eq(t, 1000, out.CreateIndex) + must.Eq(t, 1001, out.ModifyIndex) index, err := state.Index("evals") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1001 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1001, index) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_UpsertEvals_Eval_ChildJob(t *testing.T) { @@ -4855,17 +4326,13 @@ func TestStateStore_UpsertEvals_Eval_ChildJob(t *testing.T) { state := testStateStore(t) parent := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent)) child := mock.Job() child.Status = "" child.ParentID = parent.ID - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child)) eval := mock.Eval() eval.Status = structs.EvalStatusComplete @@ -4876,69 +4343,36 @@ func TestStateStore_UpsertEvals_Eval_ChildJob(t *testing.T) { ws := memdb.NewWatchSet() ws2 := memdb.NewWatchSet() ws3 := memdb.NewWatchSet() - if _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.EvalByID(ws2, eval.ID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.EvalsByJob(ws3, eval.Namespace, eval.JobID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) + must.NoError(t, err) + _, err = state.EvalByID(ws2, eval.ID) + must.NoError(t, err) + _, err = state.EvalsByJob(ws3, eval.Namespace, eval.JobID) + must.NoError(t, err) - err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) - if err != nil { - t.Fatalf("err: %v", err) - } + err = state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } - if !watchFired(ws2) { - t.Fatalf("bad") - } - if !watchFired(ws3) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) + must.True(t, watchFired(ws2), must.Sprint("expected watch to fire")) + must.True(t, watchFired(ws3), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.EvalByID(ws, eval.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if !reflect.DeepEqual(eval, out) { - t.Fatalf("bad: %#v %#v", eval, out) - } + must.NoError(t, err) + must.Eq(t, eval, out) index, err := state.Index("evals") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1000 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1000, index) summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if summary == nil { - t.Fatalf("nil summary") - } - if summary.JobID != parent.ID { - t.Fatalf("bad summary id: %v", parent.ID) - } - if summary.Children == nil { - t.Fatalf("nil children summary") - } - if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { - t.Fatalf("bad children summary: %v", summary.Children) - } - - if watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, err) + must.NotNil(t, summary) + must.Eq(t, parent.ID, summary.JobID, must.Sprint("bad summary id")) + must.NotNil(t, summary.Children) + must.Eq(t, &structs.JobChildrenSummary{Dead: 1}, summary.Children) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_DeleteEval_Eval(t *testing.T) { @@ -4952,140 +4386,90 @@ func TestStateStore_DeleteEval_Eval(t *testing.T) { // Create watchsets so we can test that upsert fires the watch watches := make([]memdb.WatchSet, 12) - for i := 0; i < 12; i++ { + for i := range 12 { watches[i] = memdb.NewWatchSet() } - if _, err := state.EvalByID(watches[0], eval1.ID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.EvalByID(watches[1], eval2.ID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.EvalsByJob(watches[2], eval1.Namespace, eval1.JobID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.EvalsByJob(watches[3], eval2.Namespace, eval2.JobID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocByID(watches[4], alloc1.ID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocByID(watches[5], alloc2.ID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocsByEval(watches[6], alloc1.EvalID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocsByEval(watches[7], alloc2.EvalID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocsByJob(watches[8], alloc1.Namespace, alloc1.JobID, false); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocsByJob(watches[9], alloc2.Namespace, alloc2.JobID, false); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocsByNode(watches[10], alloc1.NodeID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocsByNode(watches[11], alloc2.NodeID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err := state.EvalByID(watches[0], eval1.ID) + must.NoError(t, err) + _, err = state.EvalByID(watches[1], eval2.ID) + must.NoError(t, err) + _, err = state.EvalsByJob(watches[2], eval1.Namespace, eval1.JobID) + must.NoError(t, err) + _, err = state.EvalsByJob(watches[3], eval2.Namespace, eval2.JobID) + must.NoError(t, err) + _, err = state.AllocByID(watches[4], alloc1.ID) + must.NoError(t, err) + _, err = state.AllocByID(watches[5], alloc2.ID) + must.NoError(t, err) + _, err = state.AllocsByEval(watches[6], alloc1.EvalID) + must.NoError(t, err) + _, err = state.AllocsByEval(watches[7], alloc2.EvalID) + must.NoError(t, err) + _, err = state.AllocsByJob(watches[8], alloc1.Namespace, alloc1.JobID, false) + must.NoError(t, err) + _, err = state.AllocsByJob(watches[9], alloc2.Namespace, alloc2.JobID, false) + must.NoError(t, err) + _, err = state.AllocsByNode(watches[10], alloc1.NodeID) + must.NoError(t, err) + _, err = state.AllocsByNode(watches[11], alloc2.NodeID) + must.NoError(t, err) state.UpsertJobSummary(900, mock.JobSummary(eval1.JobID)) state.UpsertJobSummary(901, mock.JobSummary(eval2.JobID)) state.UpsertJobSummary(902, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(903, mock.JobSummary(alloc2.JobID)) - err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1, eval2}) - if err != nil { - t.Fatalf("err: %v", err) - } + err = state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1, eval2}) + must.NoError(t, err) err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) err = state.DeleteEval(1002, []string{eval1.ID, eval2.ID}, []string{alloc1.ID, alloc2.ID}, false) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - for i, ws := range watches { - if !watchFired(ws) { - t.Fatalf("bad %d", i) - } + for _, ws := range watches { + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) } ws := memdb.NewWatchSet() out, err := state.EvalByID(ws, eval1.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if out != nil { - t.Fatalf("bad: %#v %#v", eval1, out) - } + must.NoError(t, err) + must.Nil(t, out) out, err = state.EvalByID(ws, eval2.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if out != nil { - t.Fatalf("bad: %#v %#v", eval1, out) - } + must.NoError(t, err) + must.Nil(t, out) outA, err := state.AllocByID(ws, alloc1.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if out != nil { - t.Fatalf("bad: %#v %#v", alloc1, outA) - } + must.NoError(t, err) + must.Nil(t, outA) outA, err = state.AllocByID(ws, alloc2.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if out != nil { - t.Fatalf("bad: %#v %#v", alloc1, outA) - } + must.NoError(t, err) + must.Nil(t, outA) index, err := state.Index("evals") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1002 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1002, index) index, err = state.Index("allocs") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1002 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1002, index) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) // Call the eval delete function with zero length eval and alloc ID arrays. // This should result in the table indexes both staying the same, rather // than updating without cause. - require.NoError(t, state.DeleteEval(1010, []string{}, []string{}, false)) + must.NoError(t, state.DeleteEval(1010, []string{}, []string{}, false)) allocsIndex, err := state.Index("allocs") - require.NoError(t, err) - require.Equal(t, uint64(1002), allocsIndex) + must.NoError(t, err) + must.Eq(t, uint64(1002), allocsIndex) evalsIndex, err := state.Index("evals") - require.NoError(t, err) - require.Equal(t, uint64(1002), evalsIndex) + must.NoError(t, err) + must.Eq(t, uint64(1002), evalsIndex) } // This tests the evalDelete boolean by deleting a Pending eval and Pending Alloc. @@ -5095,17 +4479,13 @@ func TestStateStore_DeleteEval_ChildJob(t *testing.T) { state := testStateStore(t) parent := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent)) child := mock.Job() child.Status = "" child.ParentID = parent.ID - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child)) eval1 := mock.Eval() eval1.JobID = child.ID @@ -5113,51 +4493,29 @@ func TestStateStore_DeleteEval_ChildJob(t *testing.T) { alloc1.JobID = child.ID err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Create watchsets so we can test that delete fires the watch ws := memdb.NewWatchSet() - if _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err = state.JobSummaryByID(ws, parent.Namespace, parent.ID) + must.NoError(t, err) err = state.DeleteEval(1002, []string{eval1.ID}, []string{alloc1.ID}, false) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - if summary == nil { - t.Fatalf("nil summary") - } - if summary.JobID != parent.ID { - t.Fatalf("bad summary id: %v", parent.ID) - } - if summary.Children == nil { - t.Fatalf("nil children summary") - } - if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { - t.Fatalf("bad children summary: %v", summary.Children) - } - - if watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, err) + must.NotNil(t, summary) + must.Eq(t, parent.ID, summary.JobID) + must.NotNil(t, summary.Children) + must.Eq(t, &structs.JobChildrenSummary{Dead: 1}, summary.Children) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_DeleteEval_UserInitiated(t *testing.T) { @@ -5168,35 +4526,35 @@ func TestStateStore_DeleteEval_UserInitiated(t *testing.T) { // Upsert a scheduler config object, so we have something to check and // modify. schedulerConfig := structs.SchedulerConfiguration{PauseEvalBroker: false} - require.NoError(t, testState.SchedulerSetConfig(10, &schedulerConfig)) + must.NoError(t, testState.SchedulerSetConfig(10, &schedulerConfig)) // Generate some mock evals and upsert these into state. mockEval1 := mock.Eval() mockEval2 := mock.Eval() - require.NoError(t, testState.UpsertEvals( + must.NoError(t, testState.UpsertEvals( structs.MsgTypeTestSetup, 20, []*structs.Evaluation{mockEval1, mockEval2})) mockEvalIDs := []string{mockEval1.ID, mockEval2.ID} // Try and delete the evals without pausing the eval broker. err := testState.DeleteEval(30, mockEvalIDs, []string{}, true) - require.ErrorContains(t, err, "eval broker is enabled") + must.ErrorContains(t, err, "eval broker is enabled") // Pause the eval broker on the scheduler config, and try deleting the // evals again. schedulerConfig.PauseEvalBroker = true - require.NoError(t, testState.SchedulerSetConfig(30, &schedulerConfig)) + must.NoError(t, testState.SchedulerSetConfig(30, &schedulerConfig)) - require.NoError(t, testState.DeleteEval(40, mockEvalIDs, []string{}, true)) + must.NoError(t, testState.DeleteEval(40, mockEvalIDs, []string{}, true)) ws := memdb.NewWatchSet() mockEval1Lookup, err := testState.EvalByID(ws, mockEval1.ID) - require.NoError(t, err) - require.Nil(t, mockEval1Lookup) + must.NoError(t, err) + must.Nil(t, mockEval1Lookup) mockEval2Lookup, err := testState.EvalByID(ws, mockEval1.ID) - require.NoError(t, err) - require.Nil(t, mockEval2Lookup) + must.NoError(t, err) + must.Nil(t, mockEval2Lookup) } // TestStateStore_DeleteEvalsByFilter_Pagination tests the pagination logic for @@ -5219,7 +4577,7 @@ func TestStateStore_DeleteEvalsByFilter_Pagination(t *testing.T) { must.NoError(t, store.SchedulerSetConfig(index, schedulerConfig)) evals := []*structs.Evaluation{} - for i := 0; i < evalCount; i++ { + for range evalCount { mockEval := mock.Eval() evals = append(evals, mockEval) } @@ -5432,7 +4790,7 @@ func TestStateStore_EvalIsUserDeleteSafe(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { actualResult := isEvalDeleteSafe(tc.inputAllocs, tc.inputJob) - require.Equal(t, tc.expectedResult, actualResult) + must.Eq(t, tc.expectedResult, actualResult) }) } } @@ -5449,30 +4807,19 @@ func TestStateStore_EvalsByJob(t *testing.T) { evals := []*structs.Evaluation{eval1, eval2} err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, evals) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) err = state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval3}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) ws := memdb.NewWatchSet() out, err := state.EvalsByJob(ws, eval1.Namespace, eval1.JobID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) sort.Sort(EvalIDSort(evals)) sort.Sort(EvalIDSort(out)) - if !reflect.DeepEqual(evals, out) { - t.Fatalf("bad: %#v %#v", evals, out) - } - - if watchFired(ws) { - t.Fatalf("bad") - } + must.Eq(t, evals, out) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_Evals(t *testing.T) { @@ -5481,21 +4828,17 @@ func TestStateStore_Evals(t *testing.T) { state := testStateStore(t) var evals []*structs.Evaluation - for i := 0; i < 10; i++ { + for i := range 10 { eval := mock.Eval() evals = append(evals, eval) err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000+uint64(i), []*structs.Evaluation{eval}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) } ws := memdb.NewWatchSet() iter, err := state.Evals(ws, false) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) var out []*structs.Evaluation for { @@ -5509,13 +4852,8 @@ func TestStateStore_Evals(t *testing.T) { sort.Sort(EvalIDSort(evals)) sort.Sort(EvalIDSort(out)) - if !reflect.DeepEqual(evals, out) { - t.Fatalf("bad: %#v %#v", evals, out) - } - - if watchFired(ws) { - t.Fatalf("bad") - } + must.Eq(t, evals, out) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_EvalsByIDPrefix(t *testing.T) { @@ -5535,16 +4873,14 @@ func TestStateStore_EvalsByIDPrefix(t *testing.T) { "abbbbbbb-7bfb-395d-eb95-0685af2176b2", "bbbbbbbb-7bfb-395d-eb95-0685af2176b2", } - for i := 0; i < 9; i++ { + for i := range 9 { eval := mock.Eval() eval.ID = ids[i] evals = append(evals, eval) } err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, evals) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) gatherEvals := func(iter memdb.ResultIterator) []*structs.Evaluation { var evals []*structs.Evaluation @@ -5561,7 +4897,7 @@ func TestStateStore_EvalsByIDPrefix(t *testing.T) { t.Run("list by prefix", func(t *testing.T) { ws := memdb.NewWatchSet() iter, err := state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "aaaa", SortDefault) - require.NoError(t, err) + must.NoError(t, err) got := []string{} for _, e := range gatherEvals(iter) { @@ -5575,24 +4911,24 @@ func TestStateStore_EvalsByIDPrefix(t *testing.T) { "aaaaabbb-7bfb-395d-eb95-0685af2176b2", "aaaabbbb-7bfb-395d-eb95-0685af2176b2", } - require.Len(t, got, 5, "expected five evaluations") - require.Equal(t, expected, got) // Must be in this order. + must.Len(t, 5, got, must.Sprint("expected five evaluations")) + must.Eq(t, expected, got) // Must be in this order. }) t.Run("invalid prefix", func(t *testing.T) { ws := memdb.NewWatchSet() iter, err := state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "b-a7bfb", SortDefault) - require.NoError(t, err) + must.NoError(t, err) out := gatherEvals(iter) - require.Len(t, out, 0, "expected zero evaluations") - require.False(t, watchFired(ws)) + must.Len(t, 0, out, must.Sprint("expected zero evaluations")) + must.False(t, watchFired(ws)) }) t.Run("reverse order", func(t *testing.T) { ws := memdb.NewWatchSet() iter, err := state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "aaaa", SortReverse) - require.NoError(t, err) + must.NoError(t, err) got := []string{} for _, e := range gatherEvals(iter) { @@ -5606,8 +4942,8 @@ func TestStateStore_EvalsByIDPrefix(t *testing.T) { "aaaaaaab-7bfb-395d-eb95-0685af2176b2", "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", } - require.Len(t, got, 5, "expected five evaluations") - require.Equal(t, expected, got) // Must be in this order. + must.Len(t, 5, got, must.Sprint("expected five evaluations")) + must.Eq(t, expected, got) // Must be in this order. }) } @@ -5628,8 +4964,8 @@ func TestStateStore_EvalsByIDPrefix_Namespaces(t *testing.T) { eval1.Namespace = ns1.Name eval2.Namespace = ns2.Name - require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) - require.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1, eval2})) + must.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1, eval2})) gatherEvals := func(iter memdb.ResultIterator) []*structs.Evaluation { var evals []*structs.Evaluation @@ -5645,25 +4981,25 @@ func TestStateStore_EvalsByIDPrefix_Namespaces(t *testing.T) { ws := memdb.NewWatchSet() iter1, err := state.EvalsByIDPrefix(ws, ns1.Name, sharedPrefix, SortDefault) - require.NoError(t, err) + must.NoError(t, err) iter2, err := state.EvalsByIDPrefix(ws, ns2.Name, sharedPrefix, SortDefault) - require.NoError(t, err) + must.NoError(t, err) iter3, err := state.EvalsByIDPrefix(ws, structs.AllNamespacesSentinel, sharedPrefix, SortDefault) - require.NoError(t, err) + must.NoError(t, err) evalsNs1 := gatherEvals(iter1) evalsNs2 := gatherEvals(iter2) evalsNs3 := gatherEvals(iter3) - require.Len(t, evalsNs1, 1) - require.Len(t, evalsNs2, 1) - require.Len(t, evalsNs3, 2) + must.Len(t, 1, evalsNs1) + must.Len(t, 1, evalsNs2) + must.Len(t, 2, evalsNs3) iter1, err = state.EvalsByIDPrefix(ws, ns1.Name, eval1.ID[:8], SortDefault) - require.NoError(t, err) + must.NoError(t, err) evalsNs1 = gatherEvals(iter1) - require.Len(t, evalsNs1, 1) - require.False(t, watchFired(ws)) + must.Len(t, 1, evalsNs1) + must.False(t, watchFired(ws)) } func TestStateStore_EvalsRelatedToID(t *testing.T) { @@ -5708,7 +5044,7 @@ func TestStateStore_EvalsRelatedToID(t *testing.T) { e8.NextEval = uuid.Generate() err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{e1, e2, e3, e4, e5, e6, e7, e8}) - require.NoError(t, err) + must.NoError(t, err) testCases := []struct { name string @@ -5758,30 +5094,30 @@ func TestStateStore_EvalsRelatedToID(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ws := memdb.NewWatchSet() related, err := state.EvalsRelatedToID(ws, tc.id) - require.NoError(t, err) + must.NoError(t, err) got := []string{} for _, e := range related { got = append(got, e.ID) } - require.ElementsMatch(t, tc.expected, got) + must.SliceContainsAll(t, tc.expected, got) }) } t.Run("blocking query", func(t *testing.T) { ws := memdb.NewWatchSet() - _, err := state.EvalsRelatedToID(ws, e2.ID) - require.NoError(t, err) + _, err = state.EvalsRelatedToID(ws, e2.ID) + must.NoError(t, err) // Update an eval off the chain and make sure watchset doesn't fire. e7.Status = structs.EvalStatusComplete state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{e7}) - require.False(t, watchFired(ws)) + must.False(t, watchFired(ws)) // Update an eval in the chain and make sure watchset does fire. e3.Status = structs.EvalStatusComplete state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{e3}) - require.True(t, watchFired(ws)) + must.True(t, watchFired(ws)) }) } @@ -5876,7 +5212,7 @@ func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { // Create watchsets so we can test that update fires the watch watches := make([]memdb.WatchSet, 8) - for i := 0; i < 8; i++ { + for i := range 8 { watches[i] = memdb.NewWatchSet() } _, err := state.AllocByID(watches[0], alloc1.ID) @@ -6223,61 +5559,40 @@ func TestStateStore_UpsertAlloc_Alloc(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) // Create watchsets so we can test that update fires the watch watches := make([]memdb.WatchSet, 4) - for i := 0; i < 4; i++ { + for i := range 4 { watches[i] = memdb.NewWatchSet() } - if _, err := state.AllocByID(watches[0], alloc.ID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocsByEval(watches[1], alloc.EvalID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocsByJob(watches[2], alloc.Namespace, alloc.JobID, false); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocsByNode(watches[3], alloc.NodeID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err := state.AllocByID(watches[0], alloc.ID) + must.NoError(t, err) + _, err = state.AllocsByEval(watches[1], alloc.EvalID) + must.NoError(t, err) + _, err = state.AllocsByJob(watches[2], alloc.Namespace, alloc.JobID, false) + must.NoError(t, err) + _, err = state.AllocsByNode(watches[3], alloc.NodeID) + must.NoError(t, err) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) - if err != nil { - t.Fatalf("err: %v", err) - } + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) + must.NoError(t, err) - for i, ws := range watches { - if !watchFired(ws) { - t.Fatalf("bad %d", i) - } + for _, ws := range watches { + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) } ws := memdb.NewWatchSet() out, err := state.AllocByID(ws, alloc.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - if !reflect.DeepEqual(alloc, out) { - t.Fatalf("bad: %#v %#v", alloc, out) - } + must.NoError(t, err) + must.Eq(t, alloc, out) index, err := state.Index("allocs") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1000 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1000, index) summary, err := state.JobSummaryByID(ws, alloc.Namespace, alloc.JobID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) tgSummary, ok := summary.Summary["web"] if !ok { @@ -6287,14 +5602,11 @@ func TestStateStore_UpsertAlloc_Alloc(t *testing.T) { t.Fatalf("expected queued: %v, actual: %v", 1, tgSummary.Starting) } - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) alloc := mock.Alloc() @@ -6306,42 +5618,38 @@ func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { deployment.TaskGroups[alloc.TaskGroup].ProgressDeadline = pdeadline alloc.DeploymentID = deployment.ID - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - require.Nil(state.UpsertDeployment(1000, deployment)) + must.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) + must.Nil(t, state.UpsertDeployment(1000, deployment)) // Create a watch set so we can test that update fires the watch ws := memdb.NewWatchSet() - require.Nil(state.AllocsByDeployment(ws, alloc.DeploymentID)) + _, err := state.AllocsByDeployment(ws, alloc.DeploymentID) + must.NoError(t, err) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) - require.Nil(err) - - if !watchFired(ws) { - t.Fatalf("watch not fired") - } + err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) + must.NoError(t, err) + must.True(t, watchFired(ws), must.Sprint("watch not fired")) ws = memdb.NewWatchSet() allocs, err := state.AllocsByDeployment(ws, alloc.DeploymentID) - require.Nil(err) - require.Len(allocs, 1) - require.EqualValues(alloc, allocs[0]) + must.NoError(t, err) + must.Len(t, 1, allocs) + must.Eq(t, alloc, allocs[0]) index, err := state.Index("allocs") - require.Nil(err) - require.EqualValues(1001, index) - if watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, err) + must.Eq(t, 1001, index) + must.False(t, watchFired(ws), must.Sprint("watch was fired")) // Check that the deployment state was updated dout, err := state.DeploymentByID(nil, deployment.ID) - require.Nil(err) - require.NotNil(dout) - require.Len(dout.TaskGroups, 1) + must.NoError(t, err) + must.NotNil(t, dout) + must.MapLen(t, 1, dout.TaskGroups) dstate := dout.TaskGroups[alloc.TaskGroup] - require.NotNil(dstate) - require.Equal(1, dstate.PlacedAllocs) - require.True(now.Add(pdeadline).Equal(dstate.RequireProgressBy)) + must.NotNil(t, dstate) + must.Eq(t, 1, dstate.PlacedAllocs) + must.True(t, now.Add(pdeadline).Equal(dstate.RequireProgressBy)) } func TestStateStore_UpsertAlloc_AllocsByNamespace(t *testing.T) { @@ -6367,26 +5675,26 @@ func TestStateStore_UpsertAlloc_AllocsByNamespace(t *testing.T) { alloc4.Namespace = ns2.Name alloc4.Job.Namespace = ns2.Name - require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc1.Job)) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, alloc3.Job)) + must.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc1.Job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, alloc3.Job)) // Create watchsets so we can test that update fires the watch watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} _, err := state.AllocsByNamespace(watches[0], ns1.Name) - require.NoError(t, err) + must.NoError(t, err) _, err = state.AllocsByNamespace(watches[1], ns2.Name) - require.NoError(t, err) + must.NoError(t, err) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2, alloc3, alloc4})) - require.True(t, watchFired(watches[0])) - require.True(t, watchFired(watches[1])) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2, alloc3, alloc4})) + must.True(t, watchFired(watches[0])) + must.True(t, watchFired(watches[1])) ws := memdb.NewWatchSet() iter1, err := state.AllocsByNamespace(ws, ns1.Name) - require.NoError(t, err) + must.NoError(t, err) iter2, err := state.AllocsByNamespace(ws, ns2.Name) - require.NoError(t, err) + must.NoError(t, err) var out1 []*structs.Allocation for { @@ -6406,20 +5714,20 @@ func TestStateStore_UpsertAlloc_AllocsByNamespace(t *testing.T) { out2 = append(out2, raw.(*structs.Allocation)) } - require.Len(t, out1, 2) - require.Len(t, out2, 2) + must.Len(t, 2, out1) + must.Len(t, 2, out2) for _, alloc := range out1 { - require.Equal(t, ns1.Name, alloc.Namespace) + must.Eq(t, ns1.Name, alloc.Namespace) } for _, alloc := range out2 { - require.Equal(t, ns2.Name, alloc.Namespace) + must.Eq(t, ns2.Name, alloc.Namespace) } index, err := state.Index("allocs") - require.NoError(t, err) - require.EqualValues(t, 1001, index) - require.False(t, watchFired(ws)) + must.NoError(t, err) + must.Eq(t, 1001, index) + must.False(t, watchFired(ws)) } // Testing to ensure we keep issue @@ -6511,7 +5819,7 @@ func TestStateStore_UpsertAlloc_StickyVolumes(t *testing.T) { JobID: stickyJob.ID, TaskGroupName: stickyJob.TaskGroups[0].Name, }) - must.Nil(t, err) + must.NoError(t, err) for raw := iter.Next(); raw != nil; raw = iter.Next() { claim := raw.(*structs.TaskGroupHostVolumeClaim) claims = append(claims, claim) @@ -6521,7 +5829,7 @@ func TestStateStore_UpsertAlloc_StickyVolumes(t *testing.T) { // clean up the state txn := store.db.WriteTxn(1000) _, err = txn.DeletePrefix(TableTaskGroupHostVolumeClaim, "id_prefix", stickyJob.ID) - must.Nil(t, err) + must.NoError(t, err) must.NoError(t, store.deleteAllocsForJobTxn(txn, 1000, structs.DefaultNamespace, stickyJob.ID)) must.NoError(t, txn.Commit()) @@ -6550,13 +5858,13 @@ func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { state := testStateStore(t) parent := mock.Job() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent)) child := mock.Job() child.Status = "" child.ParentID = parent.ID - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child)) alloc := mock.Alloc() alloc.JobID = child.ID @@ -6565,26 +5873,26 @@ func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { // Create watchsets so we can test that delete fires the watch ws := memdb.NewWatchSet() _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) - require.NoError(t, err) + must.NoError(t, err) err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) - require.NoError(t, err) + must.NoError(t, err) - require.True(t, watchFired(ws)) + must.True(t, watchFired(ws)) ws = memdb.NewWatchSet() summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) - require.NoError(t, err) - require.NotNil(t, summary) + must.NoError(t, err) + must.NotNil(t, summary) - require.Equal(t, parent.ID, summary.JobID) - require.NotNil(t, summary.Children) + must.Eq(t, parent.ID, summary.JobID) + must.NotNil(t, summary.Children) - require.Equal(t, int64(0), summary.Children.Pending) - require.Equal(t, int64(1), summary.Children.Running) - require.Equal(t, int64(0), summary.Children.Dead) + must.Eq(t, int64(0), summary.Children.Pending) + must.Eq(t, int64(1), summary.Children.Running) + must.Eq(t, int64(0), summary.Children.Dead) - require.False(t, watchFired(ws)) + must.False(t, watchFired(ws)) } func TestStateStore_UpsertAlloc_NextAllocation(t *testing.T) { @@ -6618,20 +5926,14 @@ func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) ws := memdb.NewWatchSet() summary, err := state.JobSummaryByID(ws, alloc.Namespace, alloc.JobID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) tgSummary := summary.Summary["web"] if tgSummary.Starting != 1 { t.Fatalf("expected starting: %v, actual: %v", 1, tgSummary.Starting) @@ -6644,71 +5946,47 @@ func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { // Create watchsets so we can test that update fires the watch watches := make([]memdb.WatchSet, 4) - for i := 0; i < 4; i++ { + for i := range 4 { watches[i] = memdb.NewWatchSet() } - if _, err := state.AllocByID(watches[0], alloc2.ID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocsByEval(watches[1], alloc2.EvalID); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocsByJob(watches[2], alloc2.Namespace, alloc2.JobID, false); err != nil { - t.Fatalf("bad: %v", err) - } - if _, err := state.AllocsByNode(watches[3], alloc2.NodeID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err = state.AllocByID(watches[0], alloc2.ID) + must.NoError(t, err) + _, err = state.AllocsByEval(watches[1], alloc2.EvalID) + must.NoError(t, err) + _, err = state.AllocsByJob(watches[2], alloc2.Namespace, alloc2.JobID, false) + must.NoError(t, err) + _, err = state.AllocsByNode(watches[3], alloc2.NodeID) + must.NoError(t, err) err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc2}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - for i, ws := range watches { - if !watchFired(ws) { - t.Fatalf("bad %d", i) - } + for _, ws := range watches { + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) } ws = memdb.NewWatchSet() out, err := state.AllocByID(ws, alloc.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if !reflect.DeepEqual(alloc2, out) { - t.Fatalf("bad: %#v %#v", alloc2, out) - } + must.Eq(t, alloc2, out) - if out.CreateIndex != 1000 { - t.Fatalf("bad: %#v", out) - } - if out.ModifyIndex != 1002 { - t.Fatalf("bad: %#v", out) - } + must.Eq(t, 1000, out.CreateIndex) + must.Eq(t, 1002, out.ModifyIndex) index, err := state.Index("allocs") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1002 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1002, index) // Ensure that summary hasb't changed summary, err = state.JobSummaryByID(ws, alloc.Namespace, alloc.JobID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) tgSummary = summary.Summary["web"] if tgSummary.Starting != 1 { t.Fatalf("expected starting: %v, actual: %v", 1, tgSummary.Starting) } - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } // This test ensures that the state store will mark the clients status as lost @@ -6720,27 +5998,19 @@ func TestStateStore_UpdateAlloc_Lost(t *testing.T) { alloc := mock.Alloc() alloc.ClientStatus = "foo" - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) alloc2 := new(structs.Allocation) *alloc2 = *alloc alloc2.ClientStatus = structs.AllocClientStatusLost - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2})) ws := memdb.NewWatchSet() out, err := state.AllocByID(ws, alloc2.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) if out.ClientStatus != structs.AllocClientStatusLost { t.Fatalf("bad: %#v", out) @@ -6758,51 +6028,38 @@ func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { // Upsert a job state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) - if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if err := state.DeleteJob(1001, alloc.Namespace, alloc.JobID); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.DeleteJob(1001, alloc.Namespace, alloc.JobID)) // Update the desired state of the allocation to stop allocCopy := alloc.Copy() allocCopy.DesiredStatus = structs.AllocDesiredStatusStop - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{allocCopy}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{allocCopy})) // Update the client state of the allocation to complete allocCopy1 := allocCopy.Copy() allocCopy1.ClientStatus = structs.AllocClientStatusComplete - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{allocCopy1}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{allocCopy1})) ws := memdb.NewWatchSet() out, _ := state.AllocByID(ws, alloc.ID) // Update the modify index of the alloc before comparing allocCopy1.ModifyIndex = 1003 - if !reflect.DeepEqual(out, allocCopy1) { - t.Fatalf("expected: %#v \n actual: %#v", allocCopy1, out) - } + must.Eq(t, out, allocCopy1) } func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) alloc := mock.Alloc() - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) + must.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) + must.Nil(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) t1 := &structs.DesiredTransition{ Migrate: pointer.Of(true), @@ -6823,43 +6080,43 @@ func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { evals := []*structs.Evaluation{eval} m := map[string]*structs.DesiredTransition{alloc.ID: t1} - require.Nil(state.UpdateAllocsDesiredTransitions(structs.MsgTypeTestSetup, 1001, m, evals)) + must.Nil(t, state.UpdateAllocsDesiredTransitions(structs.MsgTypeTestSetup, 1001, m, evals)) ws := memdb.NewWatchSet() out, err := state.AllocByID(ws, alloc.ID) - require.Nil(err) - require.NotNil(out.DesiredTransition.Migrate) - require.True(*out.DesiredTransition.Migrate) - require.EqualValues(1000, out.CreateIndex) - require.EqualValues(1001, out.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, out.DesiredTransition.Migrate) + must.True(t, *out.DesiredTransition.Migrate) + must.Eq(t, 1000, out.CreateIndex) + must.Eq(t, 1001, out.ModifyIndex) index, err := state.Index("allocs") - require.Nil(err) - require.EqualValues(1001, index) + must.NoError(t, err) + must.Eq(t, 1001, index) // Check the eval is created eout, err := state.EvalByID(nil, eval.ID) - require.Nil(err) - require.NotNil(eout) + must.NoError(t, err) + must.NotNil(t, eout) m = map[string]*structs.DesiredTransition{alloc.ID: t2} - require.Nil(state.UpdateAllocsDesiredTransitions(structs.MsgTypeTestSetup, 1002, m, evals)) + must.Nil(t, state.UpdateAllocsDesiredTransitions(structs.MsgTypeTestSetup, 1002, m, evals)) ws = memdb.NewWatchSet() out, err = state.AllocByID(ws, alloc.ID) - require.Nil(err) - require.NotNil(out.DesiredTransition.Migrate) - require.False(*out.DesiredTransition.Migrate) - require.EqualValues(1000, out.CreateIndex) - require.EqualValues(1002, out.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, out.DesiredTransition.Migrate) + must.False(t, *out.DesiredTransition.Migrate) + must.Eq(t, 1000, out.CreateIndex) + must.Eq(t, 1002, out.ModifyIndex) index, err = state.Index("allocs") - require.Nil(err) - require.EqualValues(1002, index) + must.NoError(t, err) + must.Eq(t, 1002, index) // Try with a bogus alloc id m = map[string]*structs.DesiredTransition{uuid.Generate(): t2} - require.Nil(state.UpdateAllocsDesiredTransitions(structs.MsgTypeTestSetup, 1003, m, evals)) + must.Nil(t, state.UpdateAllocsDesiredTransitions(structs.MsgTypeTestSetup, 1003, m, evals)) } func TestStateStore_JobSummary(t *testing.T) { @@ -6911,9 +6168,7 @@ func TestStateStore_JobSummary(t *testing.T) { alloc5.DesiredStatus = structs.AllocDesiredStatusRun state.UpsertAllocs(structs.MsgTypeTestSetup, 970, []*structs.Allocation{alloc5}) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) expectedSummary := structs.JobSummary{ JobID: job.ID, @@ -6929,9 +6184,7 @@ func TestStateStore_JobSummary(t *testing.T) { } summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) - if !reflect.DeepEqual(&expectedSummary, summary) { - t.Fatalf("expected: %#v, actual: %v", expectedSummary, summary) - } + must.Eq(t, &expectedSummary, summary) // De-register the job. state.DeleteJob(980, job.Namespace, job.ID) @@ -6981,9 +6234,7 @@ func TestStateStore_JobSummary(t *testing.T) { } summary, _ = state.JobSummaryByID(ws, job1.Namespace, job1.ID) - if !reflect.DeepEqual(&expectedSummary, summary) { - t.Fatalf("expected: %#v, actual: %#v", expectedSummary, summary) - } + must.Eq(t, &expectedSummary, summary) } func TestStateStore_ReconcileJobSummary(t *testing.T) { @@ -7079,14 +6330,11 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { CreateIndex: 100, ModifyIndex: 120, } - if !reflect.DeepEqual(&expectedSummary, summary) { - t.Fatalf("expected: %v, actual: %v", expectedSummary, summary) - } + must.Eq(t, &expectedSummary, summary) } func TestStateStore_ReconcileParentJobSummary(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) @@ -7123,12 +6371,12 @@ func TestStateStore_ReconcileParentJobSummary(t *testing.T) { alloc2.JobID = childJob.ID alloc2.ClientStatus = structs.AllocClientStatusFailed - require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 110, nil, childJob)) - require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 111, []*structs.Allocation{alloc, alloc2})) + must.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 110, nil, childJob)) + must.Nil(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 111, []*structs.Allocation{alloc, alloc2})) // Make the summary incorrect in the state store summary, err := state.JobSummaryByID(nil, job1.Namespace, job1.ID) - require.Nil(err) + must.NoError(t, err) summary.Children = nil summary.Summary = make(map[string]structs.TaskGroupSummary) @@ -7155,7 +6403,7 @@ func TestStateStore_ReconcileParentJobSummary(t *testing.T) { CreateIndex: 100, ModifyIndex: 120, } - require.Equal(&expectedSummary, summary) + must.Eq(t, &expectedSummary, summary) // Verify child job summary is also correct childSummary, _ := state.JobSummaryByID(ws, childJob.Namespace, childJob.ID) @@ -7171,7 +6419,7 @@ func TestStateStore_ReconcileParentJobSummary(t *testing.T) { CreateIndex: 110, ModifyIndex: 120, } - require.Equal(&expectedChildSummary, childSummary) + must.Eq(t, &expectedChildSummary, childSummary) } func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { @@ -7191,9 +6439,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { alloc1.ClientStatus = structs.AllocClientStatusRunning // Updating allocation should not throw any error - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, []*structs.Allocation{alloc1}); err != nil { - t.Fatalf("expect err: %v", err) - } + must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, []*structs.Allocation{alloc1})) // Re-Register the job state.UpsertJob(structs.MsgTypeTestSetup, 500, nil, alloc.Job) @@ -7201,9 +6447,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { // Update the alloc again alloc2 := alloc.Copy() alloc2.ClientStatus = structs.AllocClientStatusComplete - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, []*structs.Allocation{alloc1}); err != nil { - t.Fatalf("expect err: %v", err) - } + must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, []*structs.Allocation{alloc1})) // Job Summary of the newly registered job shouldn't account for the // allocation update for the older job @@ -7220,9 +6464,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { ws := memdb.NewWatchSet() summary, _ := state.JobSummaryByID(ws, alloc.Namespace, alloc.Job.ID) - if !reflect.DeepEqual(&expectedSummary, summary) { - t.Fatalf("expected: %v, actual: %v", expectedSummary, summary) - } + must.Eq(t, &expectedSummary, summary) } func TestStateStore_EvictAlloc_Alloc(t *testing.T) { @@ -7233,35 +6475,24 @@ func TestStateStore_EvictAlloc_Alloc(t *testing.T) { state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) alloc2 := new(structs.Allocation) *alloc2 = *alloc alloc2.DesiredStatus = structs.AllocDesiredStatusEvict err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) ws := memdb.NewWatchSet() out, err := state.AllocByID(ws, alloc.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if out.DesiredStatus != structs.AllocDesiredStatusEvict { - t.Fatalf("bad: %#v %#v", alloc, out) - } + must.Eq(t, structs.AllocDesiredStatusEvict, out.DesiredStatus, + must.Sprintf("bad: %#v %#v", alloc, out)) index, err := state.Index("allocs") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1001 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1001, index) } func TestStateStore_AllocsByNode(t *testing.T) { @@ -7270,7 +6501,7 @@ func TestStateStore_AllocsByNode(t *testing.T) { state := testStateStore(t) var allocs []*structs.Allocation - for i := 0; i < 10; i++ { + for range 10 { alloc := mock.Alloc() alloc.NodeID = "foo" allocs = append(allocs, alloc) @@ -7281,26 +6512,17 @@ func TestStateStore_AllocsByNode(t *testing.T) { } err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) ws := memdb.NewWatchSet() out, err := state.AllocsByNode(ws, "foo") - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) sort.Sort(AllocIDSort(allocs)) sort.Sort(AllocIDSort(out)) - if !reflect.DeepEqual(allocs, out) { - t.Fatalf("bad: %#v %#v", allocs, out) - } - - if watchFired(ws) { - t.Fatalf("bad") - } + must.Eq(t, allocs, out) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_AllocsByNodeTerminal(t *testing.T) { @@ -7309,7 +6531,7 @@ func TestStateStore_AllocsByNodeTerminal(t *testing.T) { state := testStateStore(t) var allocs, term, nonterm []*structs.Allocation - for i := 0; i < 10; i++ { + for i := range 10 { alloc := mock.Alloc() alloc.NodeID = "foo" if i%2 == 0 { @@ -7326,40 +6548,27 @@ func TestStateStore_AllocsByNodeTerminal(t *testing.T) { } err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Verify the terminal allocs ws := memdb.NewWatchSet() out, err := state.AllocsByNodeTerminal(ws, "foo", true) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) sort.Sort(AllocIDSort(term)) sort.Sort(AllocIDSort(out)) - if !reflect.DeepEqual(term, out) { - t.Fatalf("bad: %#v %#v", term, out) - } + must.Eq(t, term, out) // Verify the non-terminal allocs out, err = state.AllocsByNodeTerminal(ws, "foo", false) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) sort.Sort(AllocIDSort(nonterm)) sort.Sort(AllocIDSort(out)) - if !reflect.DeepEqual(nonterm, out) { - t.Fatalf("bad: %#v %#v", nonterm, out) - } - - if watchFired(ws) { - t.Fatalf("bad") - } + must.Eq(t, nonterm, out) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_AllocsByJob(t *testing.T) { @@ -7368,7 +6577,7 @@ func TestStateStore_AllocsByJob(t *testing.T) { state := testStateStore(t) var allocs []*structs.Allocation - for i := 0; i < 10; i++ { + for range 10 { alloc := mock.Alloc() alloc.JobID = "foo" allocs = append(allocs, alloc) @@ -7379,26 +6588,17 @@ func TestStateStore_AllocsByJob(t *testing.T) { } err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) ws := memdb.NewWatchSet() out, err := state.AllocsByJob(ws, mock.Alloc().Namespace, "foo", false) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) sort.Sort(AllocIDSort(allocs)) sort.Sort(AllocIDSort(out)) - if !reflect.DeepEqual(allocs, out) { - t.Fatalf("bad: %#v %#v", allocs, out) - } - - if watchFired(ws) { - t.Fatalf("bad") - } + must.Eq(t, allocs, out) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_AllocsForRegisteredJob(t *testing.T) { @@ -7411,40 +6611,32 @@ func TestStateStore_AllocsForRegisteredJob(t *testing.T) { job := mock.Job() job.ID = "foo" state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) - for i := 0; i < 3; i++ { + for range 3 { alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID allocs = append(allocs, alloc) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, allocs); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, allocs)) - if err := state.DeleteJob(250, job.Namespace, job.ID); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.DeleteJob(250, job.Namespace, job.ID)) job1 := mock.Job() job1.ID = "foo" job1.CreateIndex = 50 state.UpsertJob(structs.MsgTypeTestSetup, 300, nil, job1) - for i := 0; i < 4; i++ { + for range 4 { alloc := mock.Alloc() alloc.Job = job1 alloc.JobID = job1.ID allocs1 = append(allocs1, alloc) } - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs1); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs1)) ws := memdb.NewWatchSet() out, err := state.AllocsByJob(ws, job1.Namespace, job1.ID, true) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) expected := len(allocs1) // state.DeleteJob corresponds to stop -purge, so all allocs from the original job should be gone if len(out) != expected { @@ -7452,18 +6644,14 @@ func TestStateStore_AllocsForRegisteredJob(t *testing.T) { } out1, err := state.AllocsByJob(ws, job1.Namespace, job1.ID, false) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) expected = len(allocs1) if len(out1) != expected { t.Fatalf("expected: %v, actual: %v", expected, len(out1)) } - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_AllocsByIDPrefix(t *testing.T) { @@ -7483,7 +6671,7 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) { "abbbbbbb-7bfb-395d-eb95-0685af2176b2", "bbbbbbbb-7bfb-395d-eb95-0685af2176b2", } - for i := 0; i < 9; i++ { + for i := range 9 { alloc := mock.Alloc() alloc.ID = ids[i] allocs = append(allocs, alloc) @@ -7494,7 +6682,7 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) { } err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) - require.NoError(t, err) + must.NoError(t, err) gatherAllocs := func(iter memdb.ResultIterator) []*structs.Allocation { var allocs []*structs.Allocation @@ -7511,10 +6699,10 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) { t.Run("allocs by prefix", func(t *testing.T) { ws := memdb.NewWatchSet() iter, err := state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "aaaa", SortDefault) - require.NoError(t, err) + must.NoError(t, err) out := gatherAllocs(iter) - require.Len(t, out, 5, "expected five allocations") + must.Len(t, 5, out, must.Sprint("expected five allocations")) got := []string{} for _, a := range out { @@ -7527,27 +6715,27 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) { "aaaaabbb-7bfb-395d-eb95-0685af2176b2", "aaaabbbb-7bfb-395d-eb95-0685af2176b2", } - require.Equal(t, expected, got) - require.False(t, watchFired(ws)) + must.Eq(t, expected, got) + must.False(t, watchFired(ws)) }) t.Run("invalid prefix", func(t *testing.T) { ws := memdb.NewWatchSet() iter, err := state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "b-a7bfb", SortDefault) - require.NoError(t, err) + must.NoError(t, err) out := gatherAllocs(iter) - require.Len(t, out, 0) - require.False(t, watchFired(ws)) + must.Len(t, 0, out) + must.False(t, watchFired(ws)) }) t.Run("reverse", func(t *testing.T) { ws := memdb.NewWatchSet() iter, err := state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "aaaa", SortReverse) - require.NoError(t, err) + must.NoError(t, err) out := gatherAllocs(iter) - require.Len(t, out, 5, "expected five allocations") + must.Len(t, 5, out, must.Sprint("expected five allocations")) got := []string{} for _, a := range out { @@ -7560,8 +6748,8 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) { "aaaaaaab-7bfb-395d-eb95-0685af2176b2", "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", } - require.Equal(t, expected, got) - require.False(t, watchFired(ws)) + must.Eq(t, expected, got) + must.False(t, watchFired(ws)) }) } @@ -7583,8 +6771,8 @@ func TestStateStore_AllocsByIDPrefix_Namespaces(t *testing.T) { alloc1.Namespace = ns1.Name alloc2.Namespace = ns2.Name - require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) - require.NoError(t, state.UpsertAllocs( + must.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + must.NoError(t, state.UpsertAllocs( structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) gatherAllocs := func(iter memdb.ResultIterator) []*structs.Allocation { @@ -7602,21 +6790,21 @@ func TestStateStore_AllocsByIDPrefix_Namespaces(t *testing.T) { ws := memdb.NewWatchSet() iter1, err := state.AllocsByIDPrefix(ws, ns1.Name, sharedPrefix, SortDefault) - require.NoError(t, err) + must.NoError(t, err) iter2, err := state.AllocsByIDPrefix(ws, ns2.Name, sharedPrefix, SortDefault) - require.NoError(t, err) + must.NoError(t, err) allocsNs1 := gatherAllocs(iter1) allocsNs2 := gatherAllocs(iter2) - require.Len(t, allocsNs1, 1) - require.Len(t, allocsNs2, 1) + must.Len(t, 1, allocsNs1) + must.Len(t, 1, allocsNs2) iter1, err = state.AllocsByIDPrefix(ws, ns1.Name, alloc1.ID[:8], SortDefault) - require.NoError(t, err) + must.NoError(t, err) allocsNs1 = gatherAllocs(iter1) - require.Len(t, allocsNs1, 1) - require.False(t, watchFired(ws)) + must.Len(t, 1, allocsNs1) + must.False(t, watchFired(ws)) } func TestStateStore_Allocs(t *testing.T) { @@ -7625,7 +6813,7 @@ func TestStateStore_Allocs(t *testing.T) { state := testStateStore(t) var allocs []*structs.Allocation - for i := 0; i < 10; i++ { + for range 10 { alloc := mock.Alloc() allocs = append(allocs, alloc) } @@ -7634,15 +6822,11 @@ func TestStateStore_Allocs(t *testing.T) { } err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) ws := memdb.NewWatchSet() iter, err := state.Allocs(ws, SortDefault) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) var out []*structs.Allocation for { @@ -7656,13 +6840,8 @@ func TestStateStore_Allocs(t *testing.T) { sort.Sort(AllocIDSort(allocs)) sort.Sort(AllocIDSort(out)) - if !reflect.DeepEqual(allocs, out) { - t.Fatalf("bad: %#v %#v", allocs, out) - } - - if watchFired(ws) { - t.Fatalf("bad") - } + must.Eq(t, allocs, out) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_Allocs_PrevAlloc(t *testing.T) { @@ -7671,8 +6850,7 @@ func TestStateStore_Allocs_PrevAlloc(t *testing.T) { state := testStateStore(t) var allocs []*structs.Allocation - require := require.New(t) - for i := 0; i < 5; i++ { + for range 5 { alloc := mock.Alloc() allocs = append(allocs, alloc) } @@ -7684,11 +6862,11 @@ func TestStateStore_Allocs_PrevAlloc(t *testing.T) { allocs[2].PreviousAllocation = allocs[1].ID err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) - require.Nil(err) + must.NoError(t, err) ws := memdb.NewWatchSet() iter, err := state.Allocs(ws, SortDefault) - require.Nil(err) + must.NoError(t, err) var out []*structs.Allocation for { @@ -7706,17 +6884,17 @@ func TestStateStore_Allocs_PrevAlloc(t *testing.T) { sort.Sort(AllocIDSort(allocs)) sort.Sort(AllocIDSort(out)) - require.Equal(allocs, out) - require.False(watchFired(ws)) + must.Eq(t, allocs, out) + must.False(t, watchFired(ws)) // Insert another alloc, verify index of previous alloc also got updated alloc := mock.Alloc() alloc.PreviousAllocation = allocs[0].ID err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) - require.Nil(err) + must.NoError(t, err) alloc0, err := state.AllocByID(nil, allocs[0].ID) - require.Nil(err) - require.Equal(alloc0.ModifyIndex, uint64(1001)) + must.NoError(t, err) + must.Eq(t, alloc0.ModifyIndex, uint64(1001)) } func TestStateStore_SetJobStatus_ForceStatus(t *testing.T) { @@ -7730,29 +6908,18 @@ func TestStateStore_SetJobStatus_ForceStatus(t *testing.T) { job := mock.Job() job.Status = "" job.ModifyIndex = index - if err := txn.Insert("jobs", job); err != nil { - t.Fatalf("job insert failed: %v", err) - } + must.NoError(t, txn.Insert("jobs", job)) exp := "foobar" index = uint64(1000) - if err := state.setJobStatus(index, txn, job, false, exp); err != nil { - t.Fatalf("setJobStatus() failed: %v", err) - } + must.NoError(t, state.setJobStatus(index, txn, job, false, exp)) i, err := txn.First("jobs", "id", job.Namespace, job.ID) - if err != nil { - t.Fatalf("job lookup failed: %v", err) - } + must.NoError(t, err) updated := i.(*structs.Job) - if updated.Status != exp { - t.Fatalf("setJobStatus() set %v; expected %v", updated.Status, exp) - } - - if updated.ModifyIndex != index { - t.Fatalf("setJobStatus() set %d; expected %d", updated.ModifyIndex, index) - } + must.Eq(t, exp, updated.Status) + must.Eq(t, index, updated.ModifyIndex) } func TestStateStore_SetJobStatus_NoOp(t *testing.T) { @@ -7766,24 +6933,15 @@ func TestStateStore_SetJobStatus_NoOp(t *testing.T) { job := mock.Job() job.Status = structs.JobStatusPending job.ModifyIndex = 10 - if err := txn.Insert("jobs", job); err != nil { - t.Fatalf("job insert failed: %v", err) - } + must.NoError(t, txn.Insert("jobs", job)) index = uint64(1000) - if err := state.setJobStatus(index, txn, job, false, ""); err != nil { - t.Fatalf("setJobStatus() failed: %v", err) - } + must.NoError(t, state.setJobStatus(index, txn, job, false, "")) i, err := txn.First("jobs", "id", job.Namespace, job.ID) - if err != nil { - t.Fatalf("job lookup failed: %v", err) - } + must.NoError(t, err) updated := i.(*structs.Job) - - if updated.ModifyIndex == index { - t.Fatalf("setJobStatus() should have been a no-op") - } + must.Eq(t, 10, updated.ModifyIndex, must.Sprint("should be no-op")) } func TestStateStore_SetJobStatus(t *testing.T) { @@ -7797,28 +6955,16 @@ func TestStateStore_SetJobStatus(t *testing.T) { job := mock.Job() job.Status = "foobar" job.ModifyIndex = 10 - if err := txn.Insert("jobs", job); err != nil { - t.Fatalf("job insert failed: %v", err) - } + must.NoError(t, txn.Insert("jobs", job)) index := uint64(1000) - if err := state.setJobStatus(index, txn, job, false, ""); err != nil { - t.Fatalf("setJobStatus() failed: %v", err) - } + must.NoError(t, state.setJobStatus(index, txn, job, false, "")) i, err := txn.First("jobs", "id", job.Namespace, job.ID) - if err != nil { - t.Fatalf("job lookup failed: %v", err) - } + must.NoError(t, err) updated := i.(*structs.Job) - - if updated.Status != structs.JobStatusPending { - t.Fatalf("setJobStatus() set %v; expected %v", updated.Status, structs.JobStatusPending) - } - - if updated.ModifyIndex != index { - t.Fatalf("setJobStatus() set %d; expected %d", updated.ModifyIndex, index) - } + must.Eq(t, structs.JobStatusPending, updated.Status) + must.Eq(t, index, updated.ModifyIndex) } func TestStateStore_GetJobStatus(t *testing.T) { @@ -8064,21 +7210,14 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { // Create watchsets so we can test that upsert fires the watch ws := memdb.NewWatchSet() - if _, err := state.JobSummaryByID(ws, job.Namespace, job.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err := state.JobSummaryByID(ws, job.Namespace, job.ID) + must.NoError(t, err) - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() summary, _ := state.JobSummaryByID(ws, job.Namespace, job.ID) @@ -8094,15 +7233,12 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { CreateIndex: 1000, ModifyIndex: 1001, } - if !reflect.DeepEqual(summary, &expectedSummary) { - t.Fatalf("expected: %v, actual: %v", expectedSummary, summary) - } + must.Eq(t, summary, &expectedSummary) // Create watchsets so we can test that upsert fires the watch ws2 := memdb.NewWatchSet() - if _, err := state.JobSummaryByID(ws2, job.Namespace, job.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err = state.JobSummaryByID(ws2, job.Namespace, job.ID) + must.NoError(t, err) alloc2 := mock.Alloc() alloc2.Job = job @@ -8112,13 +7248,9 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { alloc3.Job = job alloc3.JobID = job.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc2, alloc3}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc2, alloc3})) - if !watchFired(ws2) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws2)) outA, _ := state.AllocByID(ws, alloc3.ID) @@ -8135,15 +7267,12 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { CreateIndex: job.CreateIndex, ModifyIndex: outA.ModifyIndex, } - if !reflect.DeepEqual(summary, &expectedSummary) { - t.Fatalf("expected summary: %v, actual: %v", expectedSummary, summary) - } + must.Eq(t, summary, &expectedSummary) // Create watchsets so we can test that upsert fires the watch ws3 := memdb.NewWatchSet() - if _, err := state.JobSummaryByID(ws3, job.Namespace, job.ID); err != nil { - t.Fatalf("bad: %v", err) - } + _, err = state.JobSummaryByID(ws3, job.Namespace, job.ID) + must.NoError(t, err) alloc4 := mock.Alloc() alloc4.ID = alloc2.ID @@ -8157,13 +7286,9 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { alloc5.JobID = alloc3.JobID alloc5.ClientStatus = structs.AllocClientStatusComplete - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1004, []*structs.Allocation{alloc4, alloc5}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1004, []*structs.Allocation{alloc4, alloc5})) - if !watchFired(ws2) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws2)) outA, _ = state.AllocByID(ws, alloc5.ID) summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) @@ -8180,9 +7305,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { CreateIndex: job.CreateIndex, ModifyIndex: outA.ModifyIndex, } - if !reflect.DeepEqual(summary, &expectedSummary) { - t.Fatalf("expected: %v, actual: %v", expectedSummary, summary) - } + must.Eq(t, summary, &expectedSummary) } func TestJobSummary_UpdateClientStatus(t *testing.T) { @@ -8202,13 +7325,9 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { alloc3.JobID = job.ID err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2, alloc3}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2, alloc3})) ws := memdb.NewWatchSet() summary, _ := state.JobSummaryByID(ws, job.Namespace, job.ID) @@ -8234,13 +7353,9 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { alloc6.JobID = alloc.JobID alloc6.ClientStatus = structs.AllocClientStatusRunning - if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc4, alloc5, alloc6}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc4, alloc5, alloc6})) - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) if summary.Summary["web"].Running != 1 || summary.Summary["web"].Failed != 1 || summary.Summary["web"].Complete != 1 { @@ -8251,9 +7366,7 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { alloc7.Job = alloc.Job alloc7.JobID = alloc.JobID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc7}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc7})) summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) if summary.Summary["web"].Starting != 1 || summary.Summary["web"].Running != 1 || summary.Summary["web"].Failed != 1 || summary.Summary["web"].Complete != 1 { t.Fatalf("bad job summary: %v", summary) @@ -8289,9 +7402,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Terminal(t *testing.T) { d := mock.Deployment() d.Status = structs.DeploymentStatusFailed - if err := state.UpsertDeployment(1, d); err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, state.UpsertDeployment(1, d)) // Update the deployment req := &structs.DeploymentStatusUpdateRequest{ @@ -8315,9 +7426,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_NonTerminal(t *testing.T) { // Insert a deployment d := mock.Deployment() - if err := state.UpsertDeployment(1, d); err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, state.UpsertDeployment(1, d)) // Create an eval and a job e := mock.Eval() @@ -8335,34 +7444,26 @@ func TestStateStore_UpsertDeploymentStatusUpdate_NonTerminal(t *testing.T) { Eval: e, } err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, req) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) // Check that the status was updated properly ws := memdb.NewWatchSet() dout, err := state.DeploymentByID(ws, d.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) if dout.Status != status || dout.StatusDescription != desc { t.Fatalf("bad: %#v", dout) } // Check that the evaluation was created eout, _ := state.EvalByID(ws, e.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) if eout == nil { t.Fatalf("bad: %#v", eout) } // Check that the job was created jout, _ := state.JobByID(ws, j.Namespace, j.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) if jout == nil { t.Fatalf("bad: %#v", jout) } @@ -8378,15 +7479,11 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { // Insert a job job := mock.Job() - if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job); err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job)) // Insert a deployment d := structs.NewDeployment(job, 50, now) - if err := state.UpsertDeployment(2, d); err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, state.UpsertDeployment(2, d)) // Update the deployment req := &structs.DeploymentStatusUpdateRequest{ @@ -8397,16 +7494,12 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { }, } err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 3, req) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) // Check that the status was updated properly ws := memdb.NewWatchSet() dout, err := state.DeploymentByID(ws, d.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) if dout.Status != structs.DeploymentStatusSuccessful || dout.StatusDescription != structs.DeploymentStatusDescriptionSuccessful { t.Fatalf("bad: %#v", dout) @@ -8414,9 +7507,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { // Check that the job was created jout, _ := state.JobByID(ws, job.Namespace, job.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) if jout == nil { t.Fatalf("bad: %#v", jout) } @@ -8435,32 +7526,30 @@ func TestStateStore_UpdateJobStability(t *testing.T) { // Insert a job twice to get two versions job := mock.Job() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job)) - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 2, nil, job.Copy())) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 2, nil, job.Copy())) // Update the stability to true err := state.UpdateJobStability(3, job.Namespace, job.ID, 0, true) - require.NoError(t, err) + must.NoError(t, err) // Check that the job was updated properly ws := memdb.NewWatchSet() jout, err := state.JobByIDAndVersion(ws, job.Namespace, job.ID, 0) - require.NoError(t, err) - require.NotNil(t, jout) - require.True(t, jout.Stable, "job not marked as stable") + must.NoError(t, err) + must.NotNil(t, jout) + must.True(t, jout.Stable, must.Sprint("job not marked as stable")) // Update the stability to false err = state.UpdateJobStability(3, job.Namespace, job.ID, 0, false) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) // Check that the job was updated properly jout, err = state.JobByIDAndVersion(ws, job.Namespace, job.ID, 0) - require.NoError(t, err) - require.NotNil(t, jout) - require.False(t, jout.Stable) + must.NoError(t, err) + must.NotNil(t, jout) + must.False(t, jout.Stable) } // Test that nonexistent deployment can't be promoted @@ -8789,9 +7878,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_Terminal(t *testing.T) { d := mock.Deployment() d.Status = structs.DeploymentStatusFailed - if err := state.UpsertDeployment(1, d); err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, state.UpsertDeployment(1, d)) // Set health against the terminal deployment req := &structs.ApplyDeploymentAllocHealthRequest{ @@ -8814,9 +7901,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_Nonexistent(t *testing. // Insert a deployment d := mock.Deployment() - if err := state.UpsertDeployment(1, d); err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, state.UpsertDeployment(1, d)) // Set health against the terminal deployment req := &structs.ApplyDeploymentAllocHealthRequest{ @@ -8839,11 +7924,11 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { // Create a deployment d1 := mock.Deployment() - require.NoError(t, state.UpsertDeployment(2, d1)) + must.NoError(t, state.UpsertDeployment(2, d1)) // Create a Job job := mock.Job() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 3, nil, job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 3, nil, job)) // Create alloc with canary status a := mock.Alloc() @@ -8853,15 +7938,15 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { Healthy: pointer.Of(false), Canary: true, } - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) // Pull the deployment from state ws := memdb.NewWatchSet() deploy, err := state.DeploymentByID(ws, d1.ID) - require.NoError(t, err) + must.NoError(t, err) // Ensure that PlacedCanaries is accurate - require.Equal(t, 1, len(deploy.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) + must.Eq(t, 1, len(deploy.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) // Create alloc without canary status b := mock.Alloc() @@ -8871,19 +7956,19 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { Healthy: pointer.Of(false), Canary: false, } - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{b})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{b})) // Pull the deployment from state ws = memdb.NewWatchSet() deploy, err = state.DeploymentByID(ws, d1.ID) - require.NoError(t, err) + must.NoError(t, err) // Ensure that PlacedCanaries is accurate - require.Equal(t, 1, len(deploy.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) + must.Eq(t, 1, len(deploy.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) // Create a second deployment d2 := mock.Deployment() - require.NoError(t, state.UpsertDeployment(5, d2)) + must.NoError(t, state.UpsertDeployment(5, d2)) c := mock.Alloc() c.JobID = job.ID @@ -8892,14 +7977,14 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { Healthy: pointer.Of(false), Canary: true, } - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 6, []*structs.Allocation{c})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 6, []*structs.Allocation{c})) ws = memdb.NewWatchSet() deploy2, err := state.DeploymentByID(ws, d2.ID) - require.NoError(t, err) + must.NoError(t, err) // Ensure that PlacedCanaries is accurate - require.Equal(t, 1, len(deploy2.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) + must.Eq(t, 1, len(deploy2.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) } func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { @@ -8909,11 +7994,11 @@ func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { // Create a deployment d1 := mock.Deployment() - require.NoError(t, state.UpsertDeployment(2, d1)) + must.NoError(t, state.UpsertDeployment(2, d1)) // Create a Job job := mock.Job() - require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 3, nil, job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 3, nil, job)) // Create alloc with canary status a := mock.Alloc() @@ -8923,15 +8008,15 @@ func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { Healthy: pointer.Of(true), Canary: false, } - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) // Pull the deployment from state ws := memdb.NewWatchSet() deploy, err := state.DeploymentByID(ws, d1.ID) - require.NoError(t, err) + must.NoError(t, err) // Ensure that PlacedCanaries is accurate - require.Equal(t, 0, len(deploy.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) + must.Eq(t, 0, len(deploy.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) } // Test that allocation health can't be set for an alloc with mismatched @@ -8944,19 +8029,13 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *t // Insert two deployment d1 := mock.Deployment() d2 := mock.Deployment() - if err := state.UpsertDeployment(1, d1); err != nil { - t.Fatalf("bad: %v", err) - } - if err := state.UpsertDeployment(2, d2); err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, state.UpsertDeployment(1, d1)) + must.NoError(t, state.UpsertDeployment(2, d2)) // Insert an alloc for a random deployment a := mock.Alloc() a.DeploymentID = d1.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{a}); err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{a})) // Set health against the terminal deployment req := &structs.ApplyDeploymentAllocHealthRequest{ @@ -8980,18 +8059,14 @@ func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { // Insert a deployment d := mock.Deployment() d.TaskGroups["web"].ProgressDeadline = 5 * time.Minute - if err := state.UpsertDeployment(1, d); err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, state.UpsertDeployment(1, d)) // Insert two allocations a1 := mock.Alloc() a1.DeploymentID = d.ID a2 := mock.Alloc() a2.DeploymentID = d.ID - if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{a1, a2}); err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{a1, a2})) // Create a job to roll back to j := mock.Job() @@ -9023,47 +8098,35 @@ func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { Timestamp: ts, } err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 3, req) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) // Check that the status was updated properly ws := memdb.NewWatchSet() dout, err := state.DeploymentByID(ws, d.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) if dout.Status != status || dout.StatusDescription != desc { t.Fatalf("bad: %#v", dout) } // Check that the evaluation was created eout, _ := state.EvalByID(ws, e.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) if eout == nil { t.Fatalf("bad: %#v", eout) } // Check that the job was created jout, _ := state.JobByID(ws, j.Namespace, j.ID) - if err != nil { - t.Fatalf("bad: %v", err) - } + must.NoError(t, err) if jout == nil { t.Fatalf("bad: %#v", jout) } // Check the status of the allocs out1, err := state.AllocByID(ws, a1.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) out2, err := state.AllocByID(ws, a2.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) if !out1.DeploymentStatus.IsHealthy() { t.Fatalf("bad: alloc %q not healthy", out1.ID) @@ -9088,33 +8151,25 @@ func TestStateStore_UpsertACLPolicy(t *testing.T) { policy2 := mock.ACLPolicy() ws := memdb.NewWatchSet() - if _, err := state.ACLPolicyByName(ws, policy.Name); err != nil { - t.Fatalf("err: %v", err) - } - if _, err := state.ACLPolicyByName(ws, policy2.Name); err != nil { - t.Fatalf("err: %v", err) - } + _, err := state.ACLPolicyByName(ws, policy.Name) + must.NoError(t, err) + _, err = state.ACLPolicyByName(ws, policy2.Name) + must.NoError(t, err) - if err := state.UpsertACLPolicies(structs.MsgTypeTestSetup, 1000, []*structs.ACLPolicy{policy, policy2}); err != nil { - t.Fatalf("err: %v", err) - } - if !watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, state.UpsertACLPolicies(structs.MsgTypeTestSetup, 1000, []*structs.ACLPolicy{policy, policy2})) + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.ACLPolicyByName(ws, policy.Name) - assert.Equal(t, nil, err) - assert.Equal(t, policy, out) + must.NoError(t, err) + must.Eq(t, policy, out) out, err = state.ACLPolicyByName(ws, policy2.Name) - assert.Equal(t, nil, err) - assert.Equal(t, policy2, out) + must.Eq(t, nil, err) + must.Eq(t, policy2, out) iter, err := state.ACLPolicies(ws) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Ensure we see both policies count := 0 @@ -9130,16 +8185,10 @@ func TestStateStore_UpsertACLPolicy(t *testing.T) { } index, err := state.Index("acl_policy") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1000 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1000, index) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_ACLPolicyByNamespace(t *testing.T) { @@ -9199,63 +8248,33 @@ func TestStateStore_DeleteACLPolicy(t *testing.T) { policy2 := mock.ACLPolicy() // Create the policy - if err := state.UpsertACLPolicies(structs.MsgTypeTestSetup, 1000, []*structs.ACLPolicy{policy, policy2}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertACLPolicies(structs.MsgTypeTestSetup, 1000, []*structs.ACLPolicy{policy, policy2})) // Create a watcher ws := memdb.NewWatchSet() - if _, err := state.ACLPolicyByName(ws, policy.Name); err != nil { - t.Fatalf("err: %v", err) - } + _, err := state.ACLPolicyByName(ws, policy.Name) + must.NoError(t, err) // Delete the policy - if err := state.DeleteACLPolicies(structs.MsgTypeTestSetup, 1001, []string{policy.Name, policy2.Name}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.DeleteACLPolicies(structs.MsgTypeTestSetup, 1001, []string{policy.Name, policy2.Name})) // Ensure watching triggered - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) // Ensure we don't get the object back ws = memdb.NewWatchSet() out, err := state.ACLPolicyByName(ws, policy.Name) - assert.Equal(t, nil, err) - if out != nil { - t.Fatalf("bad: %#v", out) - } + must.NoError(t, err) + must.Nil(t, out) iter, err := state.ACLPolicies(ws) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Ensure we see neither policy - count := 0 - for { - raw := iter.Next() - if raw == nil { - break - } - count++ - } - if count != 0 { - t.Fatalf("bad: %d", count) - } + must.NoError(t, err) + must.Nil(t, iter.Next()) index, err := state.Index("acl_policy") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1001 { - t.Fatalf("bad: %d", index) - } - - if watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, err) + must.Eq(t, 1001, index) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_ACLPolicyByNamePrefix(t *testing.T) { @@ -9275,17 +8294,13 @@ func TestStateStore_ACLPolicyByNamePrefix(t *testing.T) { for _, name := range names { p := mock.ACLPolicy() p.Name = name - if err := state.UpsertACLPolicies(structs.MsgTypeTestSetup, baseIndex, []*structs.ACLPolicy{p}); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.UpsertACLPolicies(structs.MsgTypeTestSetup, baseIndex, []*structs.ACLPolicy{p})) baseIndex++ } // Scan by prefix iter, err := state.ACLPolicyByNamePrefix(nil, "foo") - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Ensure we see both policies count := 0 @@ -9304,7 +8319,7 @@ func TestStateStore_ACLPolicyByNamePrefix(t *testing.T) { sort.Strings(out) expect := []string{"foo", "foobar", "foozip"} - assert.Equal(t, expect, out) + must.Eq(t, expect, out) } func TestStateStore_BootstrapACLTokens(t *testing.T) { @@ -9315,31 +8330,25 @@ func TestStateStore_BootstrapACLTokens(t *testing.T) { tk2 := mock.ACLToken() ok, resetIdx, err := state.CanBootstrapACLToken() - assert.Nil(t, err) - assert.Equal(t, true, ok) - assert.EqualValues(t, 0, resetIdx) + must.NoError(t, err) + must.Eq(t, true, ok) + test.Eq(t, 0, resetIdx) - if err := state.BootstrapACLTokens(structs.MsgTypeTestSetup, 1000, 0, tk1); err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, state.BootstrapACLTokens(structs.MsgTypeTestSetup, 1000, 0, tk1)) out, err := state.ACLTokenByAccessorID(nil, tk1.AccessorID) - assert.Equal(t, nil, err) - assert.Equal(t, tk1, out) + must.Eq(t, nil, err) + must.Eq(t, tk1, out) ok, resetIdx, err = state.CanBootstrapACLToken() - assert.Nil(t, err) - assert.Equal(t, false, ok) - assert.EqualValues(t, 1000, resetIdx) + must.NoError(t, err) + must.Eq(t, false, ok) + test.Eq(t, 1000, resetIdx) - if err := state.BootstrapACLTokens(structs.MsgTypeTestSetup, 1001, 0, tk2); err == nil { - t.Fatalf("expected error") - } + must.Error(t, state.BootstrapACLTokens(structs.MsgTypeTestSetup, 1001, 0, tk2)) iter, err := state.ACLTokens(nil, SortDefault) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Ensure we see both policies count := 0 @@ -9350,45 +8359,25 @@ func TestStateStore_BootstrapACLTokens(t *testing.T) { } count++ } - if count != 1 { - t.Fatalf("bad: %d", count) - } + must.Eq(t, 1, count) index, err := state.Index("acl_token") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1000 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1000, index) index, err = state.Index("acl_token_bootstrap") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1000 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1000, index) // Should allow bootstrap with reset index - if err := state.BootstrapACLTokens(structs.MsgTypeTestSetup, 1001, 1000, tk2); err != nil { - t.Fatalf("err %v", err) - } + must.NoError(t, state.BootstrapACLTokens(structs.MsgTypeTestSetup, 1001, 1000, tk2)) // Check we've modified the index index, err = state.Index("acl_token") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1001 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1001, index) index, err = state.Index("acl_token_bootstrap") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1001 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1001, index) } func TestStateStore_UpsertACLTokens(t *testing.T) { @@ -9399,41 +8388,33 @@ func TestStateStore_UpsertACLTokens(t *testing.T) { tk2 := mock.ACLToken() ws := memdb.NewWatchSet() - if _, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID); err != nil { - t.Fatalf("err: %v", err) - } - if _, err := state.ACLTokenByAccessorID(ws, tk2.AccessorID); err != nil { - t.Fatalf("err: %v", err) - } + _, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID) + must.NoError(t, err) + _, err = state.ACLTokenByAccessorID(ws, tk2.AccessorID) + must.NoError(t, err) - if err := state.UpsertACLTokens(structs.MsgTypeTestSetup, 1000, []*structs.ACLToken{tk1, tk2}); err != nil { - t.Fatalf("err: %v", err) - } - if !watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, state.UpsertACLTokens(structs.MsgTypeTestSetup, 1000, []*structs.ACLToken{tk1, tk2})) + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) ws = memdb.NewWatchSet() out, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID) - assert.Equal(t, nil, err) - assert.Equal(t, tk1, out) + must.Eq(t, nil, err) + must.Eq(t, tk1, out) out, err = state.ACLTokenByAccessorID(ws, tk2.AccessorID) - assert.Equal(t, nil, err) - assert.Equal(t, tk2, out) + must.Eq(t, nil, err) + must.Eq(t, tk2, out) out, err = state.ACLTokenBySecretID(ws, tk1.SecretID) - assert.Equal(t, nil, err) - assert.Equal(t, tk1, out) + must.Eq(t, nil, err) + must.Eq(t, tk1, out) out, err = state.ACLTokenBySecretID(ws, tk2.SecretID) - assert.Equal(t, nil, err) - assert.Equal(t, tk2, out) + must.Eq(t, nil, err) + must.Eq(t, tk2, out) iter, err := state.ACLTokens(ws, SortDefault) - if err != nil { - t.Fatalf("err: %v", err) - } + must.NoError(t, err) // Ensure we see both policies count := 0 @@ -9449,16 +8430,10 @@ func TestStateStore_UpsertACLTokens(t *testing.T) { } index, err := state.Index("acl_token") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1000 { - t.Fatalf("bad: %d", index) - } + must.NoError(t, err) + must.Eq(t, 1000, index) - if watchFired(ws) { - t.Fatalf("bad") - } + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_DeleteACLTokens(t *testing.T) { @@ -9469,63 +8444,35 @@ func TestStateStore_DeleteACLTokens(t *testing.T) { tk2 := mock.ACLToken() // Create the tokens - if err := state.UpsertACLTokens(structs.MsgTypeTestSetup, 1000, []*structs.ACLToken{tk1, tk2}); err != nil { - t.Fatalf("err: %v", err) - } + err := state.UpsertACLTokens(structs.MsgTypeTestSetup, 1000, []*structs.ACLToken{tk1, tk2}) + must.NoError(t, err) // Create a watcher ws := memdb.NewWatchSet() - if _, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID); err != nil { - t.Fatalf("err: %v", err) - } + _, err = state.ACLTokenByAccessorID(ws, tk1.AccessorID) + must.NoError(t, err) // Delete the token - if err := state.DeleteACLTokens(structs.MsgTypeTestSetup, 1001, []string{tk1.AccessorID, tk2.AccessorID}); err != nil { - t.Fatalf("err: %v", err) - } + err = state.DeleteACLTokens(structs.MsgTypeTestSetup, 1001, []string{tk1.AccessorID, tk2.AccessorID}) + must.NoError(t, err) // Ensure watching triggered - if !watchFired(ws) { - t.Fatalf("bad") - } + must.True(t, watchFired(ws), must.Sprint("expected watch to fire")) // Ensure we don't get the object back ws = memdb.NewWatchSet() out, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID) - assert.Equal(t, nil, err) - if out != nil { - t.Fatalf("bad: %#v", out) - } + must.NoError(t, err) + must.Nil(t, out) iter, err := state.ACLTokens(ws, SortDefault) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Ensure we see both policies - count := 0 - for { - raw := iter.Next() - if raw == nil { - break - } - count++ - } - if count != 0 { - t.Fatalf("bad: %d", count) - } + must.NoError(t, err) + must.Nil(t, iter.Next()) index, err := state.Index("acl_token") - if err != nil { - t.Fatalf("err: %v", err) - } - if index != 1001 { - t.Fatalf("bad: %d", index) - } - - if watchFired(ws) { - t.Fatalf("bad") - } + must.NoError(t, err) + must.Eq(t, 1001, index) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) } func TestStateStore_ACLTokenByAccessorIDPrefix(t *testing.T) { @@ -9546,7 +8493,7 @@ func TestStateStore_ACLTokenByAccessorIDPrefix(t *testing.T) { tk := mock.ACLToken() tk.AccessorID = prefix + tk.AccessorID[4:] err := state.UpsertACLTokens(structs.MsgTypeTestSetup, baseIndex, []*structs.ACLToken{tk}) - require.NoError(t, err) + must.NoError(t, err) baseIndex++ } @@ -9564,34 +8511,34 @@ func TestStateStore_ACLTokenByAccessorIDPrefix(t *testing.T) { t.Run("scan by prefix", func(t *testing.T) { iter, err := state.ACLTokenByAccessorIDPrefix(nil, "aa", SortDefault) - require.NoError(t, err) + must.NoError(t, err) // Ensure we see both tokens out := gatherTokens(iter) - require.Len(t, out, 2) + must.Len(t, 2, out) got := []string{} for _, t := range out { got = append(got, t.AccessorID[:4]) } expect := []string{"aaaa", "aabb"} - require.Equal(t, expect, got) + must.Eq(t, expect, got) }) t.Run("reverse order", func(t *testing.T) { iter, err := state.ACLTokenByAccessorIDPrefix(nil, "aa", SortReverse) - require.NoError(t, err) + must.NoError(t, err) // Ensure we see both tokens out := gatherTokens(iter) - require.Len(t, out, 2) + must.Len(t, 2, out) got := []string{} for _, t := range out { got = append(got, t.AccessorID[:4]) } expect := []string{"aabb", "aaaa"} - require.Equal(t, expect, got) + must.Eq(t, expect, got) }) } @@ -9613,7 +8560,7 @@ func TestStateStore_ACLTokensByGlobal(t *testing.T) { tk4.AccessorID = "ffff" + tk4.AccessorID[4:] err := state.UpsertACLTokens(structs.MsgTypeTestSetup, 1000, []*structs.ACLToken{tk1, tk2, tk3, tk4}) - require.NoError(t, err) + must.NoError(t, err) gatherTokens := func(iter memdb.ResultIterator) []*structs.ACLToken { var tokens []*structs.ACLToken @@ -9629,21 +8576,21 @@ func TestStateStore_ACLTokensByGlobal(t *testing.T) { t.Run("only global tokens", func(t *testing.T) { iter, err := state.ACLTokensByGlobal(nil, true, SortDefault) - require.NoError(t, err) + must.NoError(t, err) got := gatherTokens(iter) - require.Len(t, got, 1) - require.Equal(t, tk3.AccessorID, got[0].AccessorID) + must.Len(t, 1, got) + must.Eq(t, tk3.AccessorID, got[0].AccessorID) }) t.Run("reverse order", func(t *testing.T) { iter, err := state.ACLTokensByGlobal(nil, false, SortReverse) - require.NoError(t, err) + must.NoError(t, err) expected := []*structs.ACLToken{tk4, tk2, tk1} got := gatherTokens(iter) - require.Len(t, got, 3) - require.Equal(t, expected, got) + must.Len(t, 3, got) + must.Eq(t, expected, got) }) } @@ -9658,7 +8605,7 @@ func TestStateStore_OneTimeTokens(t *testing.T) { token2 := mock.ACLToken() token3 := mock.ACLToken() index++ - require.Nil(t, state.UpsertACLTokens( + must.Nil(t, state.UpsertACLTokens( structs.MsgTypeTestSetup, index, []*structs.ACLToken{token1, token2, token3})) @@ -9698,14 +8645,14 @@ func TestStateStore_OneTimeTokens(t *testing.T) { for _, ott := range otts { index++ - require.NoError(t, state.UpsertOneTimeToken(structs.MsgTypeTestSetup, index, ott)) + must.NoError(t, state.UpsertOneTimeToken(structs.MsgTypeTestSetup, index, ott)) } // verify that we have exactly one OTT for each AccessorID txn := state.db.ReadTxn() iter, err := txn.Get("one_time_token", "id") - require.NoError(t, err) + must.NoError(t, err) results := []*structs.OneTimeToken{} for { raw := iter.Next() @@ -9713,25 +8660,25 @@ func TestStateStore_OneTimeTokens(t *testing.T) { break } ott, ok := raw.(*structs.OneTimeToken) - require.True(t, ok) + must.True(t, ok) results = append(results, ott) } // results aren't ordered but if we have 3 OTT and all 3 tokens, we know // we have no duplicate accessors - require.Len(t, results, 3) + must.Len(t, 3, results) accessors := []string{ results[0].AccessorID, results[1].AccessorID, results[2].AccessorID} - require.Contains(t, accessors, token1.AccessorID) - require.Contains(t, accessors, token2.AccessorID) - require.Contains(t, accessors, token3.AccessorID) + must.SliceContains(t, accessors, token1.AccessorID) + must.SliceContains(t, accessors, token2.AccessorID) + must.SliceContains(t, accessors, token3.AccessorID) // now verify expiration getExpiredTokens := func(now time.Time) []*structs.OneTimeToken { txn := state.db.ReadTxn() iter, err := state.oneTimeTokensExpiredTxn(txn, nil, now) - require.NoError(t, err) + must.NoError(t, err) results := []*structs.OneTimeToken{} for { @@ -9740,49 +8687,48 @@ func TestStateStore_OneTimeTokens(t *testing.T) { break } ott, ok := raw.(*structs.OneTimeToken) - require.True(t, ok) + must.True(t, ok) results = append(results, ott) } return results } results = getExpiredTokens(time.Now()) - require.Len(t, results, 2) + must.Len(t, 2, results) // results aren't ordered expiredAccessors := []string{results[0].AccessorID, results[1].AccessorID} - require.Contains(t, expiredAccessors, token1.AccessorID) - require.Contains(t, expiredAccessors, token2.AccessorID) - require.True(t, time.Now().After(results[0].ExpiresAt)) - require.True(t, time.Now().After(results[1].ExpiresAt)) + must.SliceContains(t, expiredAccessors, token1.AccessorID) + must.SliceContains(t, expiredAccessors, token2.AccessorID) + must.True(t, time.Now().After(results[0].ExpiresAt)) + must.True(t, time.Now().After(results[1].ExpiresAt)) // clear the expired tokens and verify they're gone index++ - require.NoError(t, state.ExpireOneTimeTokens( + must.NoError(t, state.ExpireOneTimeTokens( structs.MsgTypeTestSetup, index, time.Now())) results = getExpiredTokens(time.Now()) - require.Len(t, results, 0) + must.Len(t, 0, results) // query the unexpired token ott, err := state.OneTimeTokenBySecret(nil, otts[len(otts)-1].OneTimeSecretID) - require.NoError(t, err) - require.Equal(t, token3.AccessorID, ott.AccessorID) - require.True(t, time.Now().Before(ott.ExpiresAt)) + must.NoError(t, err) + must.Eq(t, token3.AccessorID, ott.AccessorID) + must.True(t, time.Now().Before(ott.ExpiresAt)) restore, err := state.Restore() - require.NoError(t, err) + must.NoError(t, err) err = restore.OneTimeTokenRestore(ott) - require.NoError(t, err) - require.NoError(t, restore.Commit()) + must.NoError(t, err) + must.NoError(t, restore.Commit()) ott, err = state.OneTimeTokenBySecret(nil, otts[len(otts)-1].OneTimeSecretID) - require.NoError(t, err) - require.Equal(t, token3.AccessorID, ott.AccessorID) + must.NoError(t, err) + must.Eq(t, token3.AccessorID, ott.AccessorID) } func TestStateStore_ClusterMetadata(t *testing.T) { - require := require.New(t) state := testStateStore(t) clusterID := "12345678-1234-1234-1234-1234567890" @@ -9790,17 +8736,16 @@ func TestStateStore_ClusterMetadata(t *testing.T) { meta := &structs.ClusterMetadata{ClusterID: clusterID, CreateTime: now} err := state.ClusterSetMetadata(100, meta) - require.NoError(err) + must.NoError(t, err) result, err := state.ClusterMetadata(nil) - require.NoError(err) - require.Equal(clusterID, result.ClusterID) - require.Equal(now, result.CreateTime) + must.NoError(t, err) + must.Eq(t, clusterID, result.ClusterID) + must.Eq(t, now, result.CreateTime) } func TestStateStore_UpsertScalingPolicy(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) policy := mock.ScalingPolicy() @@ -9808,31 +8753,31 @@ func TestStateStore_UpsertScalingPolicy(t *testing.T) { wsAll := memdb.NewWatchSet() all, err := state.ScalingPolicies(wsAll) - require.NoError(err) - require.Nil(all.Next()) + must.NoError(t, err) + must.Nil(t, all.Next()) ws := memdb.NewWatchSet() out, err := state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) - require.NoError(err) - require.Nil(out) + must.NoError(t, err) + must.Nil(t, out) out, err = state.ScalingPolicyByTargetAndType(ws, policy2.Target, policy2.Type) - require.NoError(err) - require.Nil(out) + must.NoError(t, err) + must.Nil(t, out) err = state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy, policy2}) - require.NoError(err) - require.True(watchFired(ws)) - require.True(watchFired(wsAll)) + must.NoError(t, err) + must.True(t, watchFired(ws)) + must.True(t, watchFired(wsAll)) ws = memdb.NewWatchSet() out, err = state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) - require.NoError(err) - require.Equal(policy, out) + must.NoError(t, err) + must.Eq(t, policy, out) out, err = state.ScalingPolicyByTargetAndType(ws, policy2.Target, policy2.Type) - require.NoError(err) - require.Equal(policy2, out) + must.NoError(t, err) + must.Eq(t, policy2, out) // Ensure we see both policies countPolicies := func() (n int, err error) { @@ -9848,13 +8793,13 @@ func TestStateStore_UpsertScalingPolicy(t *testing.T) { } count, err := countPolicies() - require.NoError(err) - require.Equal(2, count) + must.NoError(t, err) + must.Eq(t, 2, count) index, err := state.Index("scaling_policy") - require.NoError(err) - require.True(1000 == index) - require.False(watchFired(ws)) + must.NoError(t, err) + must.True(t, 1000 == index) + must.False(t, watchFired(ws)) // Check that we can add policy with same target but different type policy3 := mock.ScalingPolicy() @@ -9863,27 +8808,26 @@ func TestStateStore_UpsertScalingPolicy(t *testing.T) { } err = state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy3}) - require.NoError(err) + must.NoError(t, err) // Ensure we see both policies, since target didn't change count, err = countPolicies() - require.NoError(err) - require.Equal(2, count) + must.NoError(t, err) + must.Eq(t, 2, count) // Change type and check if we see 3 policy3.Type = "other-type" err = state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy3}) - require.NoError(err) + must.NoError(t, err) count, err = countPolicies() - require.NoError(err) - require.Equal(3, count) + must.NoError(t, err) + must.Eq(t, 3, count) } func TestStateStore_UpsertScalingPolicy_Namespace(t *testing.T) { ci.Parallel(t) - require := require.New(t) otherNamespace := "not-default-namespace" state := testStateStore(t) @@ -9893,21 +8837,21 @@ func TestStateStore_UpsertScalingPolicy_Namespace(t *testing.T) { ws1 := memdb.NewWatchSet() iter, err := state.ScalingPoliciesByNamespace(ws1, structs.DefaultNamespace, "") - require.NoError(err) - require.Nil(iter.Next()) + must.NoError(t, err) + must.Nil(t, iter.Next()) ws2 := memdb.NewWatchSet() iter, err = state.ScalingPoliciesByNamespace(ws2, otherNamespace, "") - require.NoError(err) - require.Nil(iter.Next()) + must.NoError(t, err) + must.Nil(t, iter.Next()) err = state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy, policy2}) - require.NoError(err) - require.True(watchFired(ws1)) - require.True(watchFired(ws2)) + must.NoError(t, err) + must.True(t, watchFired(ws1)) + must.True(t, watchFired(ws2)) iter, err = state.ScalingPoliciesByNamespace(nil, structs.DefaultNamespace, "") - require.NoError(err) + must.NoError(t, err) policiesInDefaultNamespace := []string{} for { raw := iter.Next() @@ -9916,10 +8860,10 @@ func TestStateStore_UpsertScalingPolicy_Namespace(t *testing.T) { } policiesInDefaultNamespace = append(policiesInDefaultNamespace, raw.(*structs.ScalingPolicy).ID) } - require.ElementsMatch([]string{policy.ID}, policiesInDefaultNamespace) + must.SliceContainsAll(t, []string{policy.ID}, policiesInDefaultNamespace) iter, err = state.ScalingPoliciesByNamespace(nil, otherNamespace, "") - require.NoError(err) + must.NoError(t, err) policiesInOtherNamespace := []string{} for { raw := iter.Next() @@ -9928,12 +8872,11 @@ func TestStateStore_UpsertScalingPolicy_Namespace(t *testing.T) { } policiesInOtherNamespace = append(policiesInOtherNamespace, raw.(*structs.ScalingPolicy).ID) } - require.ElementsMatch([]string{policy2.ID}, policiesInOtherNamespace) + must.SliceContainsAll(t, []string{policy2.ID}, policiesInOtherNamespace) } func TestStateStore_UpsertScalingPolicy_Namespace_PrefixBug(t *testing.T) { ci.Parallel(t) - require := require.New(t) ns1 := "name" ns2 := "name2" // matches prefix "name" @@ -9945,21 +8888,21 @@ func TestStateStore_UpsertScalingPolicy_Namespace_PrefixBug(t *testing.T) { ws1 := memdb.NewWatchSet() iter, err := state.ScalingPoliciesByNamespace(ws1, ns1, "") - require.NoError(err) - require.Nil(iter.Next()) + must.NoError(t, err) + must.Nil(t, iter.Next()) ws2 := memdb.NewWatchSet() iter, err = state.ScalingPoliciesByNamespace(ws2, ns2, "") - require.NoError(err) - require.Nil(iter.Next()) + must.NoError(t, err) + must.Nil(t, iter.Next()) err = state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy1, policy2}) - require.NoError(err) - require.True(watchFired(ws1)) - require.True(watchFired(ws2)) + must.NoError(t, err) + must.True(t, watchFired(ws1)) + must.True(t, watchFired(ws2)) iter, err = state.ScalingPoliciesByNamespace(nil, ns1, "") - require.NoError(err) + must.NoError(t, err) policiesInNS1 := []string{} for { raw := iter.Next() @@ -9968,10 +8911,10 @@ func TestStateStore_UpsertScalingPolicy_Namespace_PrefixBug(t *testing.T) { } policiesInNS1 = append(policiesInNS1, raw.(*structs.ScalingPolicy).ID) } - require.ElementsMatch([]string{policy1.ID}, policiesInNS1) + must.SliceContainsAll(t, []string{policy1.ID}, policiesInNS1) iter, err = state.ScalingPoliciesByNamespace(nil, ns2, "") - require.NoError(err) + must.NoError(t, err) policiesInNS2 := []string{} for { raw := iter.Next() @@ -9980,7 +8923,7 @@ func TestStateStore_UpsertScalingPolicy_Namespace_PrefixBug(t *testing.T) { } policiesInNS2 = append(policiesInNS2, raw.(*structs.ScalingPolicy).ID) } - require.ElementsMatch([]string{policy2.ID}, policiesInNS2) + must.SliceContainsAll(t, []string{policy2.ID}, policiesInNS2) } // Scaling Policy IDs are generated randomly during Job.Register @@ -9989,44 +8932,42 @@ func TestStateStore_UpsertScalingPolicy_Namespace_PrefixBug(t *testing.T) { func TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex(t *testing.T) { ci.Parallel(t) - require := require.New(t) - state := testStateStore(t) job, policy := mock.JobWithScalingPolicy() var newIndex uint64 = 1000 err := state.UpsertJob(structs.MsgTypeTestSetup, newIndex, nil, job) - require.NoError(err) + must.NoError(t, err) ws := memdb.NewWatchSet() p1, err := state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) - require.NoError(err) - require.NotNil(p1) - require.Equal(newIndex, p1.CreateIndex) - require.Equal(newIndex, p1.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, p1) + must.Eq(t, newIndex, p1.CreateIndex) + must.Eq(t, newIndex, p1.ModifyIndex) index, err := state.Index("scaling_policy") - require.NoError(err) - require.Equal(newIndex, index) - require.NotEmpty(p1.ID) + must.NoError(t, err) + must.Eq(t, newIndex, index) + must.NotEq(t, "", p1.ID) // update the job job.Meta["new-meta"] = "new-value" newIndex += 100 err = state.UpsertJob(structs.MsgTypeTestSetup, newIndex, nil, job) - require.NoError(err) - require.False(watchFired(ws), "watch should not have fired") + must.NoError(t, err) + must.False(t, watchFired(ws), must.Sprint("watch should not have fired")) p2, err := state.ScalingPolicyByTargetAndType(nil, policy.Target, policy.Type) - require.NoError(err) - require.NotNil(p2) - require.Equal(p1.ID, p2.ID, "ID should not have changed") - require.Equal(p1.CreateIndex, p2.CreateIndex) - require.Equal(p1.ModifyIndex, p2.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, p2) + must.Eq(t, p1.ID, p2.ID, must.Sprint("ID should not have changed")) + must.Eq(t, p1.CreateIndex, p2.CreateIndex) + must.Eq(t, p1.ModifyIndex, p2.ModifyIndex) index, err = state.Index("scaling_policy") - require.NoError(err) - require.Equal(index, p1.CreateIndex, "table index should not have changed") + must.NoError(t, err) + must.Eq(t, index, p1.CreateIndex, must.Sprint("table index should not have changed")) } // Updating the scaling policy for a job should update the index table and fire the watch. @@ -10034,86 +8975,82 @@ func TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex(t *testing.T) { func TestStateStore_UpsertJob_UpdateScalingPolicy(t *testing.T) { ci.Parallel(t) - require := require.New(t) - state := testStateStore(t) job, policy := mock.JobWithScalingPolicy() var oldIndex uint64 = 1000 - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, oldIndex, nil, job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, oldIndex, nil, job)) ws := memdb.NewWatchSet() p1, err := state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) - require.NoError(err) - require.NotNil(p1) - require.Equal(oldIndex, p1.CreateIndex) - require.Equal(oldIndex, p1.ModifyIndex) + must.NoError(t, err) + must.NotNil(t, p1) + must.Eq(t, oldIndex, p1.CreateIndex) + must.Eq(t, oldIndex, p1.ModifyIndex) prevId := p1.ID index, err := state.Index("scaling_policy") - require.NoError(err) - require.Equal(oldIndex, index) - require.NotEmpty(p1.ID) + must.NoError(t, err) + must.Eq(t, oldIndex, index) + must.NotEq(t, "", p1.ID) // update the job with the updated scaling policy; make sure to use a different object newPolicy := p1.Copy() newPolicy.Policy["new-field"] = "new-value" job.TaskGroups[0].Scaling = newPolicy - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, oldIndex+100, nil, job)) - require.True(watchFired(ws), "watch should have fired") + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, oldIndex+100, nil, job)) + must.True(t, watchFired(ws), must.Sprint("watch should have fired")) p2, err := state.ScalingPolicyByTargetAndType(nil, policy.Target, policy.Type) - require.NoError(err) - require.NotNil(p2) - require.Equal(p2.Policy["new-field"], "new-value") - require.Equal(prevId, p2.ID, "ID should not have changed") - require.Equal(oldIndex, p2.CreateIndex) - require.Greater(p2.ModifyIndex, oldIndex, "ModifyIndex should have advanced") + must.NoError(t, err) + must.NotNil(t, p2) + must.Eq(t, p2.Policy["new-field"], "new-value") + must.Eq(t, prevId, p2.ID, must.Sprint("ID should not have changed")) + must.Eq(t, oldIndex, p2.CreateIndex) + must.Greater(t, oldIndex, p2.ModifyIndex, must.Sprint("ModifyIndex should have advanced")) index, err = state.Index("scaling_policy") - require.NoError(err) - require.Greater(index, oldIndex, "table index should have advanced") + must.NoError(t, err) + must.Greater(t, oldIndex, index, must.Sprint("table index should have advanced")) } func TestStateStore_DeleteScalingPolicies(t *testing.T) { ci.Parallel(t) - require := require.New(t) - state := testStateStore(t) policy := mock.ScalingPolicy() policy2 := mock.ScalingPolicy() // Create the policy err := state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy, policy2}) - require.NoError(err) + must.NoError(t, err) // Create a watcher ws := memdb.NewWatchSet() _, err = state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) - require.NoError(err) + must.NoError(t, err) // Delete the policy err = state.DeleteScalingPolicies(1001, []string{policy.ID, policy2.ID}) - require.NoError(err) + must.NoError(t, err) // Ensure watching triggered - require.True(watchFired(ws)) + must.True(t, watchFired(ws)) // Ensure we don't get the objects back ws = memdb.NewWatchSet() out, err := state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) - require.NoError(err) - require.Nil(out) + must.NoError(t, err) + must.Nil(t, out) ws = memdb.NewWatchSet() out, err = state.ScalingPolicyByTargetAndType(ws, policy2.Target, policy2.Type) - require.NoError(err) - require.Nil(out) + must.NoError(t, err) + must.Nil(t, out) // Ensure we see both policies iter, err := state.ScalingPoliciesByNamespace(ws, policy.Target[structs.ScalingTargetNamespace], "") - require.NoError(err) + must.NoError(t, err) count := 0 for { raw := iter.Next() @@ -10122,66 +9059,62 @@ func TestStateStore_DeleteScalingPolicies(t *testing.T) { } count++ } - require.Equal(0, count) + must.Eq(t, 0, count) index, err := state.Index("scaling_policy") - require.NoError(err) - require.True(1001 == index) - require.False(watchFired(ws)) + must.NoError(t, err) + must.True(t, 1001 == index) + must.False(t, watchFired(ws)) } func TestStateStore_StopJob_DeleteScalingPolicies(t *testing.T) { ci.Parallel(t) - require := require.New(t) - state := testStateStore(t) job := mock.Job() err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) - require.NoError(err) + must.NoError(t, err) policy := mock.ScalingPolicy() policy.Target[structs.ScalingTargetJob] = job.ID err = state.UpsertScalingPolicies(1100, []*structs.ScalingPolicy{policy}) - require.NoError(err) + must.NoError(t, err) // Ensure the scaling policy is present and start some watches wsGet := memdb.NewWatchSet() out, err := state.ScalingPolicyByTargetAndType(wsGet, policy.Target, policy.Type) - require.NoError(err) - require.NotNil(out) + must.NoError(t, err) + must.NotNil(t, out) wsList := memdb.NewWatchSet() _, err = state.ScalingPolicies(wsList) - require.NoError(err) + must.NoError(t, err) // Stop the job job, err = state.JobByID(nil, job.Namespace, job.ID) - require.NoError(err) + must.NoError(t, err) job.Stop = true err = state.UpsertJob(structs.MsgTypeTestSetup, 1200, nil, job) - require.NoError(err) + must.NoError(t, err) // Ensure: // * the scaling policy was deleted // * the watches were fired // * the table index was advanced - require.True(watchFired(wsGet)) - require.True(watchFired(wsList)) + must.True(t, watchFired(wsGet)) + must.True(t, watchFired(wsList)) out, err = state.ScalingPolicyByTargetAndType(nil, policy.Target, policy.Type) - require.NoError(err) - require.Nil(out) + must.NoError(t, err) + must.Nil(t, out) index, err := state.Index("scaling_policy") - require.NoError(err) - require.GreaterOrEqual(index, uint64(1200)) + must.NoError(t, err) + must.GreaterEq(t, uint64(1200), index) } func TestStateStore_UnstopJob_UpsertScalingPolicies(t *testing.T) { ci.Parallel(t) - require := require.New(t) - state := testStateStore(t) job, policy := mock.JobWithScalingPolicy() @@ -10190,96 +9123,92 @@ func TestStateStore_UnstopJob_UpsertScalingPolicies(t *testing.T) { // establish watcher, verify there are no scaling policies yet ws := memdb.NewWatchSet() list, err := state.ScalingPolicies(ws) - require.NoError(err) - require.Nil(list.Next()) + must.NoError(t, err) + must.Nil(t, list.Next()) // upsert a stopped job, verify that we don't fire the watcher or add any scaling policies err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) - require.NoError(err) - require.True(watchFired(ws)) + must.NoError(t, err) + must.True(t, watchFired(ws)) list, err = state.ScalingPolicies(ws) - require.NoError(err) - require.NotNil(list.Next()) + must.NoError(t, err) + must.NotNil(t, list.Next()) // Establish a new watchset ws = memdb.NewWatchSet() _, err = state.ScalingPolicies(ws) - require.NoError(err) + must.NoError(t, err) // Unstop this job, say you'll run it again... job.Stop = false err = state.UpsertJob(structs.MsgTypeTestSetup, 1100, nil, job) - require.NoError(err) + must.NoError(t, err) // Ensure the scaling policy still exists, watch was not fired, index was not advanced out, err := state.ScalingPolicyByTargetAndType(nil, policy.Target, policy.Type) - require.NoError(err) - require.NotNil(out) + must.NoError(t, err) + must.NotNil(t, out) index, err := state.Index("scaling_policy") - require.NoError(err) - require.EqualValues(index, 1000) - require.False(watchFired(ws)) + must.NoError(t, err) + must.Eq(t, index, 1000) + must.False(t, watchFired(ws)) } func TestStateStore_DeleteJob_DeleteScalingPolicies(t *testing.T) { ci.Parallel(t) - require := require.New(t) - state := testStateStore(t) job := mock.Job() err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) - require.NoError(err) + must.NoError(t, err) policy := mock.ScalingPolicy() policy.Target[structs.ScalingTargetJob] = job.ID err = state.UpsertScalingPolicies(1001, []*structs.ScalingPolicy{policy}) - require.NoError(err) + must.NoError(t, err) // Delete the job err = state.DeleteJob(1002, job.Namespace, job.ID) - require.NoError(err) + must.NoError(t, err) // Ensure the scaling policy was deleted ws := memdb.NewWatchSet() out, err := state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) - require.NoError(err) - require.Nil(out) + must.NoError(t, err) + must.Nil(t, out) index, err := state.Index("scaling_policy") - require.NoError(err) - require.True(index > 1001) + must.NoError(t, err) + must.True(t, index > 1001) } func TestStateStore_DeleteJob_DeleteScalingPoliciesPrefixBug(t *testing.T) { ci.Parallel(t) - require := require.New(t) - state := testStateStore(t) job := mock.Job() - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) job2 := job.Copy() job2.ID = job.ID + "-but-longer" - require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2)) + must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2)) policy := mock.ScalingPolicy() policy.Target[structs.ScalingTargetJob] = job.ID policy2 := mock.ScalingPolicy() policy2.Target[structs.ScalingTargetJob] = job2.ID - require.NoError(state.UpsertScalingPolicies(1002, []*structs.ScalingPolicy{policy, policy2})) + must.NoError(t, state.UpsertScalingPolicies(1002, []*structs.ScalingPolicy{policy, policy2})) // Delete job with the shorter prefix-ID - require.NoError(state.DeleteJob(1003, job.Namespace, job.ID)) + must.NoError(t, state.DeleteJob(1003, job.Namespace, job.ID)) // Ensure only the associated scaling policy was deleted, not the one matching the job with the longer ID out, err := state.ScalingPolicyByID(nil, policy.ID) - require.NoError(err) - require.Nil(out) + must.NoError(t, err) + must.Nil(t, out) out, err = state.ScalingPolicyByID(nil, policy2.ID) - require.NoError(err) - require.NotNil(out) + must.NoError(t, err) + must.NotNil(t, out) } // This test ensures that deleting a job that doesn't have any scaling policies @@ -10288,36 +9217,32 @@ func TestStateStore_DeleteJob_DeleteScalingPoliciesPrefixBug(t *testing.T) { func TestStateStore_DeleteJob_ScalingPolicyIndexNoop(t *testing.T) { ci.Parallel(t) - require := require.New(t) - state := testStateStore(t) job := mock.Job() prevIndex, err := state.Index("scaling_policy") - require.NoError(err) + must.NoError(t, err) err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) - require.NoError(err) + must.NoError(t, err) newIndex, err := state.Index("scaling_policy") - require.NoError(err) - require.Equal(prevIndex, newIndex) + must.NoError(t, err) + must.Eq(t, prevIndex, newIndex) // Delete the job err = state.DeleteJob(1002, job.Namespace, job.ID) - require.NoError(err) + must.NoError(t, err) newIndex, err = state.Index("scaling_policy") - require.NoError(err) - require.Equal(prevIndex, newIndex) + must.NoError(t, err) + must.Eq(t, prevIndex, newIndex) } func TestStateStore_ScalingPoliciesByType(t *testing.T) { ci.Parallel(t) - require := require.New(t) - state := testStateStore(t) // Create scaling policies of different types @@ -10333,10 +9258,10 @@ func TestStateStore_ScalingPoliciesByType(t *testing.T) { pOther2.Type = "other-type-2" // Create search routine - search := func(t string) (found []string) { + search := func(ty string) (found []string) { found = []string{} - iter, err := state.ScalingPoliciesByTypePrefix(nil, t) - require.NoError(err) + iter, err := state.ScalingPoliciesByTypePrefix(nil, ty) + must.NoError(t, err) for raw := iter.Next(); raw != nil; raw = iter.Next() { found = append(found, raw.(*structs.ScalingPolicy).Type) @@ -10347,34 +9272,32 @@ func TestStateStore_ScalingPoliciesByType(t *testing.T) { // Create the policies var baseIndex uint64 = 1000 err := state.UpsertScalingPolicies(baseIndex, []*structs.ScalingPolicy{pHorzA, pHorzB, pOther1, pOther2}) - require.NoError(err) + must.NoError(t, err) // Check if we can read horizontal policies expect := []string{pHorzA.Type, pHorzB.Type} actual := search(structs.ScalingPolicyTypeHorizontal) - require.ElementsMatch(expect, actual) + must.SliceContainsAll(t, expect, actual) // Check if we can read policies of other types expect = []string{pOther1.Type} actual = search("other-type-1") - require.ElementsMatch(expect, actual) + must.SliceContainsAll(t, expect, actual) // Check that we can read policies by prefix expect = []string{"other-type-1", "other-type-2"} actual = search("other-type") - require.Equal(expect, actual) + must.Eq(t, expect, actual) // Check for empty result expect = []string{} actual = search("non-existing") - require.ElementsMatch(expect, actual) + must.SliceContainsAll(t, expect, actual) } func TestStateStore_ScalingPoliciesByTypePrefix(t *testing.T) { ci.Parallel(t) - require := require.New(t) - state := testStateStore(t) // Create scaling policies of different types @@ -10407,7 +9330,7 @@ func TestStateStore_ScalingPoliciesByTypePrefix(t *testing.T) { // Create the policies var baseIndex uint64 = 1000 err := state.UpsertScalingPolicies(baseIndex, []*structs.ScalingPolicy{pHorzA, pHorzB, pOther1, pOther2}) - require.NoError(err) + must.NoError(t, err) // Check if we can read horizontal policies expect := []string{pHorzA.Type, pHorzB.Type} @@ -10416,9 +9339,9 @@ func TestStateStore_ScalingPoliciesByTypePrefix(t *testing.T) { sort.Strings(found) sort.Strings(expect) - require.NoError(err) - require.Equal(expect, found) - require.Equal(2, count) + must.NoError(t, err) + must.Eq(t, expect, found) + must.Eq(t, 2, count) // Check if we can read other prefix policies expect = []string{pOther1.Type, pOther2.Type} @@ -10427,9 +9350,9 @@ func TestStateStore_ScalingPoliciesByTypePrefix(t *testing.T) { sort.Strings(found) sort.Strings(expect) - require.NoError(err) - require.Equal(expect, found) - require.Equal(2, count) + must.NoError(t, err) + must.Eq(t, expect, found) + must.Eq(t, 2, count) // Check for empty result expect = []string{} @@ -10438,16 +9361,14 @@ func TestStateStore_ScalingPoliciesByTypePrefix(t *testing.T) { sort.Strings(found) sort.Strings(expect) - require.NoError(err) - require.Equal(expect, found) - require.Equal(0, count) + must.NoError(t, err) + must.Eq(t, expect, found) + must.Eq(t, 0, count) } func TestStateStore_ScalingPoliciesByJob(t *testing.T) { ci.Parallel(t) - require := require.New(t) - state := testStateStore(t) policyA := mock.ScalingPolicy() policyB1 := mock.ScalingPolicy() @@ -10457,12 +9378,12 @@ func TestStateStore_ScalingPoliciesByJob(t *testing.T) { // Create the policies var baseIndex uint64 = 1000 err := state.UpsertScalingPolicies(baseIndex, []*structs.ScalingPolicy{policyA, policyB1, policyB2}) - require.NoError(err) + must.NoError(t, err) iter, err := state.ScalingPoliciesByJob(nil, policyA.Target[structs.ScalingTargetNamespace], policyA.Target[structs.ScalingTargetJob], "") - require.NoError(err) + must.NoError(t, err) // Ensure we see expected policies count := 0 @@ -10475,16 +9396,16 @@ func TestStateStore_ScalingPoliciesByJob(t *testing.T) { count++ found = append(found, raw.(*structs.ScalingPolicy).Target[structs.ScalingTargetGroup]) } - require.Equal(1, count) + must.Eq(t, 1, count) sort.Strings(found) expect := []string{policyA.Target[structs.ScalingTargetGroup]} sort.Strings(expect) - require.Equal(expect, found) + must.Eq(t, expect, found) iter, err = state.ScalingPoliciesByJob(nil, policyB1.Target[structs.ScalingTargetNamespace], policyB1.Target[structs.ScalingTargetJob], "") - require.NoError(err) + must.NoError(t, err) // Ensure we see expected policies count = 0 @@ -10497,21 +9418,19 @@ func TestStateStore_ScalingPoliciesByJob(t *testing.T) { count++ found = append(found, raw.(*structs.ScalingPolicy).Target[structs.ScalingTargetGroup]) } - require.Equal(2, count) + must.Eq(t, 2, count) sort.Strings(found) expect = []string{ policyB1.Target[structs.ScalingTargetGroup], policyB2.Target[structs.ScalingTargetGroup], } sort.Strings(expect) - require.Equal(expect, found) + must.Eq(t, expect, found) } func TestStateStore_ScalingPoliciesByJob_PrefixBug(t *testing.T) { ci.Parallel(t) - require := require.New(t) - jobPrefix := "job-name-" + uuid.Generate() state := testStateStore(t) @@ -10523,12 +9442,12 @@ func TestStateStore_ScalingPoliciesByJob_PrefixBug(t *testing.T) { // Create the policies var baseIndex uint64 = 1000 err := state.UpsertScalingPolicies(baseIndex, []*structs.ScalingPolicy{policy1, policy2}) - require.NoError(err) + must.NoError(t, err) iter, err := state.ScalingPoliciesByJob(nil, policy1.Target[structs.ScalingTargetNamespace], jobPrefix, "") - require.NoError(err) + must.NoError(t, err) // Ensure we see expected policies count := 0 @@ -10541,16 +9460,14 @@ func TestStateStore_ScalingPoliciesByJob_PrefixBug(t *testing.T) { count++ found = append(found, raw.(*structs.ScalingPolicy).ID) } - require.Equal(1, count) + must.Eq(t, 1, count) expect := []string{policy1.ID} - require.Equal(expect, found) + must.Eq(t, expect, found) } func TestStateStore_ScalingPolicyByTargetAndType(t *testing.T) { ci.Parallel(t) - require := require.New(t) - state := testStateStore(t) // Create scaling policies @@ -10566,31 +9483,30 @@ func TestStateStore_ScalingPolicyByTargetAndType(t *testing.T) { // Create the policies var baseIndex uint64 = 1000 err := state.UpsertScalingPolicies(baseIndex, []*structs.ScalingPolicy{policyA, policyB, policyC}) - require.NoError(err) + must.NoError(t, err) // Check if we can retrieve the right policies found, err := state.ScalingPolicyByTargetAndType(nil, policyA.Target, policyA.Type) - require.NoError(err) - require.Equal(policyA, found) + must.NoError(t, err) + must.Eq(t, policyA, found) // Check for wrong type found, err = state.ScalingPolicyByTargetAndType(nil, policyA.Target, "wrong_type") - require.NoError(err) - require.Nil(found) + must.NoError(t, err) + must.Nil(t, found) // Check for same target but different type found, err = state.ScalingPolicyByTargetAndType(nil, policyB.Target, policyB.Type) - require.NoError(err) - require.Equal(policyB, found) + must.NoError(t, err) + must.Eq(t, policyB, found) found, err = state.ScalingPolicyByTargetAndType(nil, policyB.Target, policyC.Type) - require.NoError(err) - require.Equal(policyC, found) + must.NoError(t, err) + must.Eq(t, policyC, found) } func TestStateStore_UpsertScalingEvent(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) job := mock.Job() @@ -10603,13 +9519,13 @@ func TestStateStore_UpsertScalingEvent(t *testing.T) { wsAll := memdb.NewWatchSet() all, err := state.ScalingEvents(wsAll) - require.NoError(err) - require.Nil(all.Next()) + must.NoError(t, err) + must.Nil(t, all.Next()) ws := memdb.NewWatchSet() out, _, err := state.ScalingEventsByJob(ws, job.Namespace, job.ID) - require.NoError(err) - require.Nil(out) + must.NoError(t, err) + must.Nil(t, out) err = state.UpsertScalingEvent(1000, &structs.ScalingEventRequest{ Namespace: job.Namespace, @@ -10617,20 +9533,20 @@ func TestStateStore_UpsertScalingEvent(t *testing.T) { TaskGroup: groupName, ScalingEvent: newEvent, }) - require.NoError(err) - require.True(watchFired(ws)) - require.True(watchFired(wsAll)) + must.NoError(t, err) + must.True(t, watchFired(ws)) + must.True(t, watchFired(wsAll)) ws = memdb.NewWatchSet() out, eventsIndex, err := state.ScalingEventsByJob(ws, job.Namespace, job.ID) - require.NoError(err) - require.Equal(map[string][]*structs.ScalingEvent{ + must.NoError(t, err) + must.Eq(t, map[string][]*structs.ScalingEvent{ groupName: {newEvent}, }, out) - require.EqualValues(eventsIndex, 1000) + must.Eq(t, eventsIndex, 1000) iter, err := state.ScalingEvents(ws) - require.NoError(err) + must.NoError(t, err) count := 0 jobsReturned := []string{} @@ -10644,23 +9560,22 @@ func TestStateStore_UpsertScalingEvent(t *testing.T) { jobsReturned = append(jobsReturned, jobEvents.JobID) count++ } - require.Equal(1, count) - require.EqualValues(jobEvents.ModifyIndex, 1000) - require.EqualValues(jobEvents.ScalingEvents[groupName][0].CreateIndex, 1000) + must.Eq(t, 1, count) + must.Eq(t, jobEvents.ModifyIndex, 1000) + must.Eq(t, jobEvents.ScalingEvents[groupName][0].CreateIndex, 1000) index, err := state.Index("scaling_event") - require.NoError(err) - require.ElementsMatch([]string{job.ID}, jobsReturned) - require.Equal(map[string][]*structs.ScalingEvent{ + must.NoError(t, err) + must.SliceContainsAll(t, []string{job.ID}, jobsReturned) + must.Eq(t, map[string][]*structs.ScalingEvent{ groupName: {newEvent}, }, jobEvents.ScalingEvents) - require.EqualValues(1000, index) - require.False(watchFired(ws)) + must.Eq(t, 1000, index) + must.False(t, watchFired(ws)) } func TestStateStore_UpsertScalingEvent_LimitAndOrder(t *testing.T) { ci.Parallel(t) - require := require.New(t) state := testStateStore(t) namespace := uuid.Generate() @@ -10682,7 +9597,7 @@ func TestStateStore_UpsertScalingEvent_LimitAndOrder(t *testing.T) { ScalingEvent: newEvent, }) index++ - require.NoError(err) + must.NoError(t, err) newEvent = structs.NewScalingEvent("") newEvent.Meta = map[string]interface{}{ @@ -10696,12 +9611,12 @@ func TestStateStore_UpsertScalingEvent_LimitAndOrder(t *testing.T) { ScalingEvent: newEvent, }) index++ - require.NoError(err) + must.NoError(t, err) } out, _, err := state.ScalingEventsByJob(nil, namespace, jobID) - require.NoError(err) - require.Len(out, 2) + must.NoError(t, err) + must.MapLen(t, 2, out) expectedEvents := []int{} for i := structs.JobTrackedScalingEvents; i > 0; i-- { @@ -10709,22 +9624,22 @@ func TestStateStore_UpsertScalingEvent_LimitAndOrder(t *testing.T) { } // checking order and content - require.Len(out[group1], structs.JobTrackedScalingEvents) + must.Len(t, structs.JobTrackedScalingEvents, out[group1]) actualEvents := []int{} for _, event := range out[group1] { - require.Equal(group1, event.Meta["group"]) + must.Eq(t, group1, event.Meta["group"].(string)) actualEvents = append(actualEvents, event.Meta["i"].(int)) } - require.Equal(expectedEvents, actualEvents) + must.Eq(t, expectedEvents, actualEvents) // checking order and content - require.Len(out[group2], structs.JobTrackedScalingEvents) + must.Len(t, structs.JobTrackedScalingEvents, out[group2]) actualEvents = []int{} for _, event := range out[group2] { - require.Equal(group2, event.Meta["group"]) + must.Eq(t, group2, event.Meta["group"].(string)) actualEvents = append(actualEvents, event.Meta["i"].(int)) } - require.Equal(expectedEvents, actualEvents) + must.Eq(t, expectedEvents, actualEvents) } func TestStateStore_Abandon(t *testing.T) { @@ -10746,11 +9661,10 @@ func TestStateSnapshot_DenormalizeAllocationDiffSlice_AllocDoesNotExist(t *testi state := testStateStore(t) alloc := mock.Alloc() - require := require.New(t) // Insert job err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job) - require.NoError(err) + must.NoError(t, err) allocDiffs := []*structs.AllocationDiff{ { @@ -10759,12 +9673,12 @@ func TestStateSnapshot_DenormalizeAllocationDiffSlice_AllocDoesNotExist(t *testi } snap, err := state.Snapshot() - require.NoError(err) + must.NoError(t, err) denormalizedAllocs, err := snap.DenormalizeAllocationDiffSlice(allocDiffs) - require.EqualError(err, fmt.Sprintf("alloc %v doesn't exist", alloc.ID)) - require.Nil(denormalizedAllocs) + must.EqError(t, err, fmt.Sprintf("alloc %v doesn't exist", alloc.ID)) + must.Nil(t, denormalizedAllocs) } // TestStateStore_SnapshotMinIndex_OK asserts StateStore.SnapshotMinIndex blocks @@ -10774,32 +9688,32 @@ func TestStateStore_SnapshotMinIndex_OK(t *testing.T) { s := testStateStore(t) index, err := s.LatestIndex() - require.NoError(t, err) + must.NoError(t, err) node := mock.Node() - require.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, index+1, node)) + must.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, index+1, node)) // Assert SnapshotMinIndex returns immediately if index < latest index ctx, cancel := context.WithTimeout(context.Background(), 0) snap, err := s.SnapshotMinIndex(ctx, index) cancel() - require.NoError(t, err) + must.NoError(t, err) snapIndex, err := snap.LatestIndex() - require.NoError(t, err) + must.NoError(t, err) if snapIndex <= index { - require.Fail(t, "snapshot index should be greater than index") + t.Fatal("snapshot index should be greater than index") } // Assert SnapshotMinIndex returns immediately if index == latest index ctx, cancel = context.WithTimeout(context.Background(), 0) snap, err = s.SnapshotMinIndex(ctx, index+1) cancel() - require.NoError(t, err) + must.NoError(t, err) snapIndex, err = snap.LatestIndex() - require.NoError(t, err) - require.Equal(t, snapIndex, index+1) + must.NoError(t, err) + must.Eq(t, snapIndex, index+1) // Assert SnapshotMinIndex blocks if index > latest index errCh := make(chan error, 1) @@ -10828,19 +9742,19 @@ func TestStateStore_SnapshotMinIndex_OK(t *testing.T) { select { case err := <-errCh: - require.NoError(t, err) + must.NoError(t, err) case <-time.After(500 * time.Millisecond): // Let it block for a bit before unblocking by upserting } node.Name = "hal" - require.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, index+2, node)) + must.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, index+2, node)) select { case err := <-errCh: - require.NoError(t, err) + must.NoError(t, err) case <-time.After(5 * time.Second): - require.Fail(t, "timed out waiting for SnapshotMinIndex to unblock") + t.Fatal("timed out waiting for SnapshotMinIndex to unblock") } } @@ -10851,14 +9765,14 @@ func TestStateStore_SnapshotMinIndex_Timeout(t *testing.T) { s := testStateStore(t) index, err := s.LatestIndex() - require.NoError(t, err) + must.NoError(t, err) // Assert SnapshotMinIndex blocks if index > latest index ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() snap, err := s.SnapshotMinIndex(ctx, index+1) - require.EqualError(t, err, context.DeadlineExceeded.Error()) - require.Nil(t, snap) + must.EqError(t, err, context.DeadlineExceeded.Error()) + must.Nil(t, snap) } // watchFired is a helper for unit tests that returns if the given watch set