api: purge testify and pretty dependencies (#15627)

* api: swap testify for test (acl)

* api: swap testify for test (agent)

 Please enter the commit message for your changes. Lines starting

* api: swap testify for test (allocations)

* api: swap testify for test (api)

* api: swap testify for test (compose)

* api: swap testify for test (constraint)

* api: swap testify for test (consul)

* api: swap testify for test (csi)

* api: swap testify for test (evaluations)

* api: swap testify for test (event stream)

* api: swap testify for test (fs)

* api: swap testify for test (ioutil)

* api: swap testify for test (jobs)

* api: swap testify for test (keyring)

* api: swap testify for test (operator_ent)

* api: swap testify for test (operator_metrics)

* api: swap testify for test (operator)

* api: swap testify for test (quota)

* api: swap testify for test (resources)

* api: swap testify for test (fix operator_metrics)

* api: swap testify for test (scaling)

* api: swap testify for test (search)

* api: swap testify for test (sentinel)

* api: swap testify for test (services)

* api: swap testify for test (status)

* api: swap testify for test (system)

* api: swap testify for test (tasks)

* api: swap testify for test (utils)

* api: swap testify for test (variables)

* api: remove dependencies on testify and pretty
This commit is contained in:
Seth Hoenig
2023-01-01 12:57:26 -06:00
committed by GitHub
parent f452441542
commit 9bfd89ccd3
30 changed files with 1139 additions and 1490 deletions

View File

@@ -6,27 +6,20 @@ import (
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/shoenig/test/must"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestACLPolicies_ListUpsert(t *testing.T) {
testutil.Parallel(t)
c, s, _ := makeACLClient(t, nil, nil)
defer s.Stop()
ap := c.ACLPolicies()
// Listing when nothing exists returns empty
result, qm, err := ap.List(nil)
if err != nil {
t.Fatalf("err: %s", err)
}
if qm.LastIndex != 1 {
t.Fatalf("bad index: %d", qm.LastIndex)
}
if n := len(result); n != 0 {
t.Fatalf("expected 0 policies, got: %d", n)
}
must.NoError(t, err)
must.One(t, qm.LastIndex)
must.Len(t, 0, result)
// Register a policy
policy := &ACLPolicy{
@@ -38,22 +31,20 @@ func TestACLPolicies_ListUpsert(t *testing.T) {
`,
}
wm, err := ap.Upsert(policy, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Check the list again
result, qm, err = ap.List(nil)
if err != nil {
t.Fatalf("err: %s", err)
}
must.NoError(t, err)
assertQueryMeta(t, qm)
if len(result) != 1 {
t.Fatalf("expected policy, got: %#v", result)
}
must.Len(t, 1, result)
}
func TestACLPolicies_Delete(t *testing.T) {
testutil.Parallel(t)
c, s, _ := makeACLClient(t, nil, nil)
defer s.Stop()
ap := c.ACLPolicies()
@@ -68,27 +59,25 @@ func TestACLPolicies_Delete(t *testing.T) {
`,
}
wm, err := ap.Upsert(policy, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Delete the policy
wm, err = ap.Delete(policy.Name, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Check the list again
result, qm, err := ap.List(nil)
if err != nil {
t.Fatalf("err: %s", err)
}
must.NoError(t, err)
assertQueryMeta(t, qm)
if len(result) != 0 {
t.Fatalf("unexpected policy, got: %#v", result)
}
must.Len(t, 0, result)
}
func TestACLPolicies_Info(t *testing.T) {
testutil.Parallel(t)
c, s, _ := makeACLClient(t, nil, nil)
defer s.Stop()
ap := c.ACLPolicies()
@@ -103,28 +92,29 @@ func TestACLPolicies_Info(t *testing.T) {
`,
}
wm, err := ap.Upsert(policy, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Query the policy
out, qm, err := ap.Info(policy.Name, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.Equal(t, policy.Name, out.Name)
must.Eq(t, policy.Name, out.Name)
}
func TestACLTokens_List(t *testing.T) {
testutil.Parallel(t)
c, s, _ := makeACLClient(t, nil, nil)
defer s.Stop()
at := c.ACLTokens()
// Expect the bootstrap token.
result, qm, err := at.List(nil)
require.NoError(t, err)
require.NotEqual(t, 0, qm.LastIndex)
require.Len(t, result, 1)
require.Nil(t, result[0].ExpirationTime)
must.NoError(t, err)
must.NonZero(t, qm.LastIndex)
must.Len(t, 1, result)
must.Nil(t, result[0].ExpirationTime)
// Create a token with an expiry.
token := &ACLToken{
@@ -134,24 +124,25 @@ func TestACLTokens_List(t *testing.T) {
ExpirationTTL: 1 * time.Hour,
}
createExpirationResp, _, err := at.Create(token, nil)
require.Nil(t, err)
must.NoError(t, err)
// Perform the listing again and ensure we have two entries along with the
// expiration correctly set and available.
listResp, qm, err := at.List(nil)
require.Nil(t, err)
must.Nil(t, err)
assertQueryMeta(t, qm)
require.Len(t, listResp, 2)
must.Len(t, 2, listResp)
for _, tokenStub := range listResp {
if tokenStub.AccessorID == createExpirationResp.AccessorID {
require.NotNil(t, tokenStub.ExpirationTime)
must.NotNil(t, tokenStub.ExpirationTime)
}
}
}
func TestACLTokens_CreateUpdate(t *testing.T) {
testutil.Parallel(t)
c, s, _ := makeACLClient(t, nil, nil)
defer s.Stop()
at := c.ACLTokens()
@@ -164,25 +155,25 @@ func TestACLTokens_CreateUpdate(t *testing.T) {
// Create the token
out, wm, err := at.Create(token, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
assert.NotNil(t, out)
must.NotNil(t, out)
// Update the token
out.Name = "other"
out2, wm, err := at.Update(out, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
assert.NotNil(t, out2)
must.NotNil(t, out2)
// Verify the change took hold
assert.Equal(t, out.Name, out2.Name)
must.Eq(t, out.Name, out2.Name)
// Try updating the token to include a TTL which is not allowed.
out2.ExpirationTTL = 10 * time.Minute
out3, _, err := at.Update(out2, nil)
require.Error(t, err)
require.Nil(t, out3)
must.Error(t, err)
must.Nil(t, out3)
// Try adding a role link to our token, which should be possible. For this
// we need to create a policy and link to this from a role.
@@ -191,7 +182,7 @@ func TestACLTokens_CreateUpdate(t *testing.T) {
Rules: `namespace "default" { policy = "read" }`,
}
writeMeta, err := c.ACLPolicies().Upsert(&aclPolicy, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, writeMeta)
// Create an ACL role referencing the previously created
@@ -201,21 +192,21 @@ func TestACLTokens_CreateUpdate(t *testing.T) {
Policies: []*ACLRolePolicyLink{{Name: aclPolicy.Name}},
}
aclRoleCreateResp, writeMeta, err := c.ACLRoles().Create(&role, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, writeMeta)
require.NotEmpty(t, aclRoleCreateResp.ID)
require.Equal(t, role.Name, aclRoleCreateResp.Name)
must.UUIDv4(t, aclRoleCreateResp.ID)
must.Eq(t, role.Name, aclRoleCreateResp.Name)
out2.Roles = []*ACLTokenRoleLink{{Name: aclRoleCreateResp.Name}}
out2.ExpirationTTL = 0
out3, _, err = at.Update(out2, nil)
require.NoError(t, err)
require.NotNil(t, out3)
require.Len(t, out3.Policies, 1)
require.Equal(t, out3.Policies[0], "foo1")
require.Len(t, out3.Roles, 1)
require.Equal(t, out3.Roles[0].Name, role.Name)
must.NoError(t, err)
must.NotNil(t, out3)
must.Len(t, 1, out3.Policies)
must.Eq(t, "foo1", out3.Policies[0])
must.Len(t, 1, out3.Roles)
must.Eq(t, role.Name, out3.Roles[0].Name)
}
func TestACLTokens_Info(t *testing.T) {
@@ -240,15 +231,15 @@ func TestACLTokens_Info(t *testing.T) {
// Create the token
out, wm, err := client.ACLTokens().Create(token, nil)
require.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
require.NotNil(t, out)
must.NotNil(t, out)
// Query the token
out2, qm, err := client.ACLTokens().Info(out.AccessorID, nil)
require.Nil(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
require.Equal(t, out, out2)
must.Eq(t, out, out2)
},
},
{
@@ -264,18 +255,18 @@ func TestACLTokens_Info(t *testing.T) {
// Create the token
out, wm, err := client.ACLTokens().Create(token, nil)
require.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
require.NotNil(t, out)
must.NotNil(t, out)
// Query the token and ensure it matches what was returned
// during the creation as well as ensuring the expiration time
// is set.
out2, qm, err := client.ACLTokens().Info(out.AccessorID, nil)
require.Nil(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
require.Equal(t, out, out2)
require.NotNil(t, out2.ExpirationTime)
must.Eq(t, out, out2)
must.NotNil(t, out2.ExpirationTime)
},
},
{
@@ -289,7 +280,7 @@ func TestACLTokens_Info(t *testing.T) {
Rules: `namespace "default" { policy = "read" }`,
}
writeMeta, err := testClient.ACLPolicies().Upsert(&aclPolicy, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, writeMeta)
// Create an ACL role referencing the previously created
@@ -299,10 +290,10 @@ func TestACLTokens_Info(t *testing.T) {
Policies: []*ACLRolePolicyLink{{Name: aclPolicy.Name}},
}
aclRoleCreateResp, writeMeta, err := testClient.ACLRoles().Create(&role, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, writeMeta)
require.NotEmpty(t, aclRoleCreateResp.ID)
require.Equal(t, role.Name, aclRoleCreateResp.Name)
must.UUIDv4(t, aclRoleCreateResp.ID)
must.Eq(t, role.Name, aclRoleCreateResp.Name)
// Create a token with a role linking.
token := &ACLToken{
@@ -312,18 +303,18 @@ func TestACLTokens_Info(t *testing.T) {
}
out, wm, err := client.ACLTokens().Create(token, nil)
require.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
require.NotNil(t, out)
must.NotNil(t, out)
// Query the token and ensure it matches what was returned
// during the creation.
out2, qm, err := client.ACLTokens().Info(out.AccessorID, nil)
require.Nil(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
require.Equal(t, out, out2)
require.Len(t, out.Roles, 1)
require.Equal(t, out.Roles[0].Name, aclPolicy.Name)
must.Eq(t, out, out2)
must.Len(t, 1, out.Roles)
must.Eq(t, out.Roles[0].Name, aclPolicy.Name)
},
},
@@ -338,7 +329,7 @@ func TestACLTokens_Info(t *testing.T) {
Rules: `namespace "default" { policy = "read" }`,
}
writeMeta, err := testClient.ACLPolicies().Upsert(&aclPolicy1, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, writeMeta)
// Create another that can be referenced within the ACL token
@@ -348,7 +339,7 @@ func TestACLTokens_Info(t *testing.T) {
Rules: `namespace "fawlty" { policy = "read" }`,
}
writeMeta, err = testClient.ACLPolicies().Upsert(&aclPolicy2, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, writeMeta)
// Create an ACL role referencing the previously created
@@ -358,10 +349,10 @@ func TestACLTokens_Info(t *testing.T) {
Policies: []*ACLRolePolicyLink{{Name: aclPolicy1.Name}},
}
aclRoleCreateResp, writeMeta, err := testClient.ACLRoles().Create(&role, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, writeMeta)
require.NotEmpty(t, aclRoleCreateResp.ID)
require.Equal(t, role.Name, aclRoleCreateResp.Name)
must.NotEq(t, "", aclRoleCreateResp.ID)
must.Eq(t, role.Name, aclRoleCreateResp.Name)
// Create a token with a role linking.
token := &ACLToken{
@@ -372,20 +363,20 @@ func TestACLTokens_Info(t *testing.T) {
}
out, wm, err := client.ACLTokens().Create(token, nil)
require.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
require.NotNil(t, out)
require.Len(t, out.Policies, 1)
require.Equal(t, out.Policies[0], aclPolicy2.Name)
require.Len(t, out.Roles, 1)
require.Equal(t, out.Roles[0].Name, role.Name)
must.NotNil(t, out)
must.Len(t, 1, out.Policies)
must.Eq(t, out.Policies[0], aclPolicy2.Name)
must.Len(t, 1, out.Roles)
must.Eq(t, out.Roles[0].Name, role.Name)
// Query the token and ensure it matches what was returned
// during the creation.
out2, qm, err := client.ACLTokens().Info(out.AccessorID, nil)
require.Nil(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
require.Equal(t, out, out2)
must.Eq(t, out, out2)
},
},
}
@@ -399,6 +390,7 @@ func TestACLTokens_Info(t *testing.T) {
func TestACLTokens_Self(t *testing.T) {
testutil.Parallel(t)
c, s, _ := makeACLClient(t, nil, nil)
defer s.Stop()
at := c.ACLTokens()
@@ -411,9 +403,9 @@ func TestACLTokens_Self(t *testing.T) {
// Create the token
out, wm, err := at.Create(token, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
assert.NotNil(t, out)
must.NotNil(t, out)
// Set the clients token to the new token
c.SetSecretID(out.SecretID)
@@ -421,14 +413,14 @@ func TestACLTokens_Self(t *testing.T) {
// Query the token
out2, qm, err := at.Self(nil)
if assert.Nil(t, err) {
assertQueryMeta(t, qm)
assert.Equal(t, out, out2)
}
must.NoError(t, err)
assertQueryMeta(t, qm)
must.Eq(t, out, out2)
}
func TestACLTokens_Delete(t *testing.T) {
testutil.Parallel(t)
c, s, _ := makeACLClient(t, nil, nil)
defer s.Stop()
at := c.ACLTokens()
@@ -441,18 +433,19 @@ func TestACLTokens_Delete(t *testing.T) {
// Create the token
out, wm, err := at.Create(token, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
assert.NotNil(t, out)
must.NotNil(t, out)
// Delete the token
wm, err = at.Delete(out.AccessorID, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
}
func TestACL_OneTimeToken(t *testing.T) {
testutil.Parallel(t)
c, s, _ := makeACLClient(t, nil, nil)
defer s.Stop()
at := c.ACLTokens()
@@ -465,27 +458,28 @@ func TestACL_OneTimeToken(t *testing.T) {
// Create the ACL token
out, wm, err := at.Create(token, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
assert.NotNil(t, out)
must.NotNil(t, out)
// Get a one-time token
c.SetSecretID(out.SecretID)
out2, wm, err := at.UpsertOneTimeToken(nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
assert.NotNil(t, out2)
must.NotNil(t, out2)
// Exchange the one-time token
out3, wm, err := at.ExchangeOneTimeToken(out2.OneTimeSecretID, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
assert.NotNil(t, out3)
assert.Equal(t, out3.AccessorID, out.AccessorID)
must.NotNil(t, out3)
must.Eq(t, out.AccessorID, out3.AccessorID)
}
func TestACLTokens_BootstrapInvalidToken(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
c.ACL.Enabled = true
})
@@ -495,7 +489,7 @@ func TestACLTokens_BootstrapInvalidToken(t *testing.T) {
bootkn := "badtoken"
// Bootstrap with invalid token
_, _, err := at.BootstrapOpts(bootkn, nil)
assert.EqualError(t, err, "Unexpected response code: 400 (invalid acl token)")
must.EqError(t, err, "Unexpected response code: 400 (invalid acl token)")
}
func TestACLTokens_BootstrapValidToken(t *testing.T) {
@@ -509,9 +503,9 @@ func TestACLTokens_BootstrapValidToken(t *testing.T) {
bootkn := "2b778dd9-f5f1-6f29-b4b4-9a5fa948757a"
// Bootstrap with Valid token
out, wm, err := at.BootstrapOpts(bootkn, nil)
assert.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
assert.Equal(t, bootkn, out.SecretID)
must.Eq(t, bootkn, out.SecretID)
}
func TestACLRoles(t *testing.T) {
@@ -522,8 +516,8 @@ func TestACLRoles(t *testing.T) {
// An initial listing shouldn't return any results.
aclRoleListResp, queryMeta, err := testClient.ACLRoles().List(nil)
require.NoError(t, err)
require.Empty(t, aclRoleListResp)
must.NoError(t, err)
must.SliceEmpty(t, aclRoleListResp)
assertQueryMeta(t, queryMeta)
// Create an ACL policy that can be referenced within the ACL role.
@@ -535,7 +529,7 @@ func TestACLRoles(t *testing.T) {
`,
}
writeMeta, err := testClient.ACLPolicies().Upsert(&aclPolicy, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, writeMeta)
// Create an ACL role referencing the previously created policy.
@@ -544,47 +538,47 @@ func TestACLRoles(t *testing.T) {
Policies: []*ACLRolePolicyLink{{Name: aclPolicy.Name}},
}
aclRoleCreateResp, writeMeta, err := testClient.ACLRoles().Create(&role, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, writeMeta)
require.NotEmpty(t, aclRoleCreateResp.ID)
require.Equal(t, role.Name, aclRoleCreateResp.Name)
must.UUIDv4(t, aclRoleCreateResp.ID)
must.Eq(t, role.Name, aclRoleCreateResp.Name)
// Another listing should return one result.
aclRoleListResp, queryMeta, err = testClient.ACLRoles().List(nil)
require.NoError(t, err)
require.Len(t, aclRoleListResp, 1)
must.NoError(t, err)
must.Len(t, 1, aclRoleListResp)
assertQueryMeta(t, queryMeta)
// Read the role using its ID.
aclRoleReadResp, queryMeta, err := testClient.ACLRoles().Get(aclRoleCreateResp.ID, nil)
require.NoError(t, err)
must.NoError(t, err)
assertQueryMeta(t, queryMeta)
require.Equal(t, aclRoleCreateResp, aclRoleReadResp)
must.Eq(t, aclRoleCreateResp, aclRoleReadResp)
// Read the role using its name.
aclRoleReadResp, queryMeta, err = testClient.ACLRoles().GetByName(aclRoleCreateResp.Name, nil)
require.NoError(t, err)
must.NoError(t, err)
assertQueryMeta(t, queryMeta)
require.Equal(t, aclRoleCreateResp, aclRoleReadResp)
must.Eq(t, aclRoleCreateResp, aclRoleReadResp)
// Update the role name.
role.Name = "acl-role-api-test-badger-badger-badger"
role.ID = aclRoleCreateResp.ID
aclRoleUpdateResp, writeMeta, err := testClient.ACLRoles().Update(&role, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, writeMeta)
require.Equal(t, role.Name, aclRoleUpdateResp.Name)
require.Equal(t, role.ID, aclRoleUpdateResp.ID)
must.Eq(t, role.Name, aclRoleUpdateResp.Name)
must.Eq(t, role.ID, aclRoleUpdateResp.ID)
// Delete the role.
writeMeta, err = testClient.ACLRoles().Delete(aclRoleCreateResp.ID, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, writeMeta)
// Make sure there are no ACL roles now present.
aclRoleListResp, queryMeta, err = testClient.ACLRoles().List(nil)
require.NoError(t, err)
require.Empty(t, aclRoleListResp)
must.NoError(t, err)
must.SliceEmpty(t, aclRoleListResp)
assertQueryMeta(t, queryMeta)
}

View File

@@ -1,9 +1,6 @@
package api
import (
"fmt"
"net/http"
"reflect"
"sort"
"strings"
"testing"
@@ -11,12 +8,11 @@ import (
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/shoenig/test/must"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAgent_Self(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
@@ -25,55 +21,46 @@ func TestAgent_Self(t *testing.T) {
// Query the endpoint
res, err := a.Self()
if err != nil {
t.Fatalf("err: %s", err)
}
must.NoError(t, err)
// Check that we got a valid response
if res.Member.Name == "" {
t.Fatalf("bad member name in response: %#v", res)
}
must.NotEq(t, "", res.Member.Name, must.Sprint("missing member name"))
// Local cache was populated
if a.nodeName == "" || a.datacenter == "" || a.region == "" {
t.Fatalf("cache should be populated, got: %#v", a)
}
must.NotEq(t, "", a.nodeName, must.Sprint("cache should be populated"))
must.NotEq(t, "", a.datacenter, must.Sprint("cache should be populated"))
must.NotEq(t, "", a.region, must.Sprint("cache should be populated"))
}
func TestAgent_NodeName(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
a := c.Agent()
// Query the agent for the node name
res, err := a.NodeName()
if err != nil {
t.Fatalf("err: %s", err)
}
if res == "" {
t.Fatalf("expected node name, got nothing")
}
nodeName, err := a.NodeName()
must.NoError(t, err)
must.NotEq(t, "", nodeName)
}
func TestAgent_Datacenter(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
a := c.Agent()
// Query the agent for the datacenter
dc, err := a.Datacenter()
if err != nil {
t.Fatalf("err: %s", err)
}
if dc != "dc1" {
t.Fatalf("expected dc1, got: %q", dc)
}
must.NoError(t, err)
must.Eq(t, "dc1", dc)
}
func TestAgent_Join(t *testing.T) {
testutil.Parallel(t)
c1, s1 := makeClient(t, nil, nil)
defer s1.Stop()
a1 := c1.Agent()
@@ -85,54 +72,43 @@ func TestAgent_Join(t *testing.T) {
// Attempting to join a nonexistent host returns error
n, err := a1.Join("nope")
if err == nil {
t.Fatalf("expected error, got nothing")
}
if n != 0 {
t.Fatalf("expected 0 nodes, got: %d", n)
}
must.Error(t, err)
must.Zero(t, 0, must.Sprint("should be zero errors"))
// Returns correctly if join succeeds
n, err = a1.Join(s2.SerfAddr)
if err != nil {
t.Fatalf("err: %s", err)
}
if n != 1 {
t.Fatalf("expected 1 node, got: %d", n)
}
must.NoError(t, err)
must.One(t, n)
}
func TestAgent_Members(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
a := c.Agent()
// Query nomad for all the known members
mem, err := a.Members()
if err != nil {
t.Fatalf("err: %s", err)
}
must.NoError(t, err)
// Check that we got the expected result
if n := len(mem.Members); n != 1 {
t.Fatalf("expected 1 member, got: %d", n)
}
if m := mem.Members[0]; m.Name == "" || m.Addr == "" || m.Port == 0 {
t.Fatalf("bad member: %#v", m)
}
must.Len(t, 1, mem.Members)
must.NotEq(t, "", mem.Members[0].Name)
must.NotEq(t, "", mem.Members[0].Addr)
must.NotEq(t, 0, mem.Members[0].Port)
}
func TestAgent_ForceLeave(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
a := c.Agent()
// Force-leave on a nonexistent node does not error
if err := a.ForceLeave("nope"); err != nil {
t.Fatalf("err: %s", err)
}
err := a.ForceLeave("nope")
must.NoError(t, err)
// TODO: test force-leave on an existing node
}
@@ -143,6 +119,7 @@ func (a *AgentMember) String() string {
func TestAgents_Sort(t *testing.T) {
testutil.Parallel(t)
var sortTests = []struct {
in []*AgentMember
out []*AgentMember
@@ -246,22 +223,20 @@ func TestAgents_Sort(t *testing.T) {
}
for _, tt := range sortTests {
sort.Sort(AgentMembersNameSort(tt.in))
if !reflect.DeepEqual(tt.in, tt.out) {
t.Errorf("\nexpected: %s\nget : %s", tt.in, tt.out)
}
must.Eq(t, tt.in, tt.out)
}
}
func TestAgent_Health(t *testing.T) {
testutil.Parallel(t)
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
a := c.Agent()
health, err := a.Health()
assert.Nil(err)
assert.True(health.Server.Ok)
must.NoError(t, err)
must.True(t, health.Server.Ok)
}
// TestAgent_MonitorWithNode tests the Monitor endpoint
@@ -314,6 +289,7 @@ OUTER:
// monitor functionality
func TestAgent_Monitor(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
@@ -331,7 +307,7 @@ func TestAgent_Monitor(t *testing.T) {
// make a request to generate some logs
_, err := agent.Region()
require.NoError(t, err)
must.NoError(t, err)
// Wait for a log message
OUTER:
@@ -347,7 +323,7 @@ OUTER:
case err := <-errCh:
t.Fatalf("error: %v", err)
case <-time.After(2 * time.Second):
require.Fail(t, "failed to get a DEBUG log message")
must.Unreachable(t, must.Sprint("failed to get DEBUG log message"))
}
}
}
@@ -370,8 +346,8 @@ func TestAgentCPUProfile(t *testing.T) {
Seconds: 1,
}
resp, err := agent.CPUProfile(opts, q)
require.NoError(t, err)
require.NotNil(t, resp)
must.NoError(t, err)
must.NotNil(t, resp)
}
// Invalid server request
@@ -381,9 +357,9 @@ func TestAgentCPUProfile(t *testing.T) {
ServerID: "unknown.global",
}
resp, err := agent.CPUProfile(opts, q)
require.Error(t, err)
require.Contains(t, err.Error(), "500 (unknown Nomad server unknown.global)")
require.Nil(t, resp)
must.Error(t, err)
must.ErrorContains(t, err, "500 (unknown Nomad server unknown.global)")
must.Nil(t, resp)
}
}
@@ -401,8 +377,8 @@ func TestAgentTrace(t *testing.T) {
}
resp, err := agent.Trace(PprofOptions{}, q)
require.NoError(t, err)
require.NotNil(t, resp)
must.NoError(t, err)
must.NotNil(t, resp)
}
func TestAgentProfile(t *testing.T) {
@@ -419,16 +395,16 @@ func TestAgentProfile(t *testing.T) {
{
resp, err := agent.Lookup("heap", PprofOptions{}, q)
require.NoError(t, err)
require.NotNil(t, resp)
must.NoError(t, err)
must.NotNil(t, resp)
}
// unknown profile
{
resp, err := agent.Lookup("invalid", PprofOptions{}, q)
require.Error(t, err)
require.Contains(t, err.Error(), "Unexpected response code: 404")
require.Nil(t, resp)
must.Error(t, err)
must.ErrorContains(t, err, "Unexpected response code: 404")
must.Nil(t, resp)
}
}
@@ -440,12 +416,12 @@ func TestAgent_SchedulerWorkerConfig(t *testing.T) {
a := c.Agent()
config, err := a.GetSchedulerWorkerConfig(nil)
require.NoError(t, err)
require.NotNil(t, config)
must.NoError(t, err)
must.NotNil(t, config)
newConfig := SchedulerWorkerPoolArgs{NumSchedulers: 0, EnabledSchedulers: []string{"_core", "system"}}
resp, err := a.SetSchedulerWorkerConfig(newConfig, nil)
require.NoError(t, err)
assert.NotEqual(t, config, resp)
must.NoError(t, err)
must.NotEq(t, config, resp)
}
func TestAgent_SchedulerWorkerConfig_BadRequest(t *testing.T) {
@@ -456,25 +432,26 @@ func TestAgent_SchedulerWorkerConfig_BadRequest(t *testing.T) {
a := c.Agent()
config, err := a.GetSchedulerWorkerConfig(nil)
require.NoError(t, err)
require.NotNil(t, config)
must.NoError(t, err)
must.NotNil(t, config)
newConfig := SchedulerWorkerPoolArgs{NumSchedulers: -1, EnabledSchedulers: []string{"_core", "system"}}
_, err = a.SetSchedulerWorkerConfig(newConfig, nil)
require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("%v (%s)", http.StatusBadRequest, "Invalid request"))
must.Error(t, err)
must.ErrorContains(t, err, "400 (Invalid request)")
}
func TestAgent_SchedulerWorkersInfo(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
a := c.Agent()
info, err := a.GetSchedulerWorkersInfo(nil)
require.NoError(t, err)
require.NotNil(t, info)
must.NoError(t, err)
must.NotNil(t, info)
defaultSchedulers := []string{"batch", "system", "sysbatch", "service", "_core"}
for _, worker := range info.Schedulers {
require.ElementsMatch(t, defaultSchedulers, worker.EnabledSchedulers)
must.SliceContainsAll(t, defaultSchedulers, worker.EnabledSchedulers)
}
}

View File

@@ -4,13 +4,12 @@ import (
"context"
"fmt"
"os"
"reflect"
"sort"
"testing"
"time"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestAllocations_List(t *testing.T) {
@@ -25,22 +24,16 @@ func TestAllocations_List(t *testing.T) {
// Querying when no allocs exist returns nothing
allocs, qm, err := a.List(nil)
if err != nil {
t.Fatalf("err: %s", err)
}
if qm.LastIndex != 0 {
t.Fatalf("bad index: %d", qm.LastIndex)
}
if n := len(allocs); n != 0 {
t.Fatalf("expected 0 allocs, got: %d", n)
}
must.NoError(t, err)
must.Zero(t, qm.LastIndex)
must.Len(t, 0, allocs)
// Create a job and attempt to register it
job := testJob()
resp, wm, err := c.Jobs().Register(job, nil)
require.NoError(t, err)
require.NotNil(t, resp)
require.NotEmpty(t, resp.EvalID)
must.NoError(t, err)
must.NotNil(t, resp)
must.UUIDv4(t, resp.EvalID)
assertWriteMeta(t, wm)
// List the allocations again
@@ -48,34 +41,29 @@ func TestAllocations_List(t *testing.T) {
WaitIndex: wm.LastIndex,
}
allocs, qm, err = a.List(qo)
require.NoError(t, err)
require.NotZero(t, qm.LastIndex)
must.NoError(t, err)
must.NonZero(t, qm.LastIndex)
// Check that we got the allocation back
require.Len(t, allocs, 1)
require.Equal(t, resp.EvalID, allocs[0].EvalID)
must.Len(t, 1, allocs)
must.Eq(t, resp.EvalID, allocs[0].EvalID)
// Resources should be unset by default
require.Nil(t, allocs[0].AllocatedResources)
must.Nil(t, allocs[0].AllocatedResources)
}
func TestAllocations_PrefixList(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
a := c.Allocations()
// Querying when no allocs exist returns nothing
allocs, qm, err := a.PrefixList("")
if err != nil {
t.Fatalf("err: %s", err)
}
if qm.LastIndex != 0 {
t.Fatalf("bad index: %d", qm.LastIndex)
}
if n := len(allocs); n != 0 {
t.Fatalf("expected 0 allocs, got: %d", n)
}
must.NoError(t, err)
must.Zero(t, qm.LastIndex)
must.Len(t, 0, allocs)
// TODO: do something that causes an allocation to actually happen
// so we can query for them.
@@ -120,9 +108,9 @@ func TestAllocations_List_Resources(t *testing.T) {
// Create a job and register it
job := testJob()
resp, wm, err := c.Jobs().Register(job, nil)
require.NoError(t, err)
require.NotNil(t, resp)
require.NotEmpty(t, resp.EvalID)
must.NoError(t, err)
must.NotNil(t, resp)
must.UUIDv4(t, resp.EvalID)
assertWriteMeta(t, wm)
// List the allocations
@@ -131,17 +119,18 @@ func TestAllocations_List_Resources(t *testing.T) {
WaitIndex: wm.LastIndex,
}
allocs, qm, err := a.List(qo)
require.NoError(t, err)
require.NotZero(t, qm.LastIndex)
must.NoError(t, err)
must.NonZero(t, qm.LastIndex)
// Check that we got the allocation back with resources
require.Len(t, allocs, 1)
require.Equal(t, resp.EvalID, allocs[0].EvalID)
require.NotNil(t, allocs[0].AllocatedResources)
must.Len(t, 1, allocs)
must.Eq(t, resp.EvalID, allocs[0].EvalID)
must.NotNil(t, allocs[0].AllocatedResources)
}
func TestAllocations_CreateIndexSort(t *testing.T) {
testutil.Parallel(t)
allocs := []*AllocationListStub{
{CreateIndex: 2},
{CreateIndex: 1},
@@ -154,13 +143,12 @@ func TestAllocations_CreateIndexSort(t *testing.T) {
{CreateIndex: 2},
{CreateIndex: 1},
}
if !reflect.DeepEqual(allocs, expect) {
t.Fatalf("\n\n%#v\n\n%#v", allocs, expect)
}
must.Eq(t, allocs, expect)
}
func TestAllocations_RescheduleInfo(t *testing.T) {
testutil.Parallel(t)
// Create a job, task group and alloc
job := &Job{
Name: pointerOf("foo"),
@@ -262,12 +250,11 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
require := require.New(t)
alloc.RescheduleTracker = tc.rescheduleTracker
job.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy
attempted, total := alloc.RescheduleInfo(tc.time)
require.Equal(tc.expAttempted, attempted)
require.Equal(tc.expTotal, total)
must.Eq(t, tc.expAttempted, attempted)
must.Eq(t, tc.expTotal, total)
})
}
@@ -275,6 +262,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
// TestAllocations_ExecErrors ensures errors are properly formatted
func TestAllocations_ExecErrors(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
a := c.Allocations()
@@ -316,8 +305,8 @@ func TestAllocations_ExecErrors(t *testing.T) {
// ensure the error is what we expect
exitCode, err := a.Exec(context.Background(), alloc, "bar", false, []string{"command"}, os.Stdin, os.Stdout, os.Stderr, sizeCh, nil)
require.Equal(t, exitCode, -2)
require.Equal(t, err.Error(), fmt.Sprintf("Unknown allocation \"%s\"", allocID))
must.Eq(t, -2, exitCode)
must.EqError(t, err, fmt.Sprintf("Unknown allocation \"%s\"", allocID))
}
func TestAllocation_ServerTerminalStatus(t *testing.T) {
@@ -347,7 +336,7 @@ func TestAllocation_ServerTerminalStatus(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
require.Equal(t, tc.expectedOutput, tc.inputAllocation.ServerTerminalStatus(), tc.name)
must.Eq(t, tc.expectedOutput, tc.inputAllocation.ServerTerminalStatus())
})
}
}
@@ -389,18 +378,20 @@ func TestAllocation_ClientTerminalStatus(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
require.Equal(t, tc.expectedOutput, tc.inputAllocation.ClientTerminalStatus(), tc.name)
must.Eq(t, tc.expectedOutput, tc.inputAllocation.ClientTerminalStatus())
})
}
}
func TestAllocations_ShouldMigrate(t *testing.T) {
testutil.Parallel(t)
require.True(t, DesiredTransition{Migrate: pointerOf(true)}.ShouldMigrate())
require.False(t, DesiredTransition{}.ShouldMigrate())
require.False(t, DesiredTransition{Migrate: pointerOf(false)}.ShouldMigrate())
must.True(t, DesiredTransition{Migrate: pointerOf(true)}.ShouldMigrate())
must.False(t, DesiredTransition{}.ShouldMigrate())
must.False(t, DesiredTransition{Migrate: pointerOf(false)}.ShouldMigrate())
}
func TestAllocations_Services(t *testing.T) {
t.Skip("needs to be implemented")
// TODO(jrasell) add tests once registration process is in place.
}

View File

@@ -17,7 +17,6 @@ import (
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/shoenig/test/must"
"github.com/stretchr/testify/require"
)
type configCallback func(c *Config)
@@ -170,11 +169,11 @@ func TestSetQueryOptions(t *testing.T) {
try := func(key, exp string) {
result := r.params.Get(key)
require.Equal(t, exp, result)
must.Eq(t, exp, result)
}
// Check auth token is set
require.Equal(t, "foobar", r.token)
must.Eq(t, "foobar", r.token)
// Check query parameters are set
try("region", "foo")
@@ -499,30 +498,28 @@ func TestCloneHttpClient(t *testing.T) {
t.Run("closing with negative timeout", func(t *testing.T) {
clone, err := cloneWithTimeout(client, -1)
require.True(t, originalTransport == client.Transport, "original transport changed")
require.NoError(t, err)
require.Equal(t, client, clone)
require.True(t, client == clone)
must.True(t, originalTransport == client.Transport, must.Sprint("original transport changed"))
must.NoError(t, err)
must.True(t, client == clone)
})
t.Run("closing with positive timeout", func(t *testing.T) {
clone, err := cloneWithTimeout(client, 1*time.Second)
require.True(t, originalTransport == client.Transport, "original transport changed")
require.NoError(t, err)
require.NotEqual(t, client, clone)
require.True(t, client != clone)
require.True(t, client.Transport != clone.Transport)
must.True(t, originalTransport == client.Transport, must.Sprint("original transport changed"))
must.NoError(t, err)
must.True(t, client != clone)
must.True(t, client.Transport != clone.Transport)
// test that proxy function is the same in clone
clonedProxy := clone.Transport.(*http.Transport).Proxy
require.NotNil(t, clonedProxy)
must.NotNil(t, clonedProxy)
_, err = clonedProxy(nil)
require.Error(t, err)
require.Equal(t, "stub function", err.Error())
must.Error(t, err)
must.EqError(t, err, "stub function")
// if we reset transport, the strutcs are equal
clone.Transport = originalTransport
require.Equal(t, client, clone)
must.Eq(t, client, clone)
})
}
@@ -555,7 +552,7 @@ func TestClient_autoUnzip(t *testing.T) {
try := func(resp *http.Response, exp error) {
err := client.autoUnzip(resp)
require.Equal(t, exp, err)
must.Eq(t, exp, err)
}
// response object is nil
@@ -585,9 +582,9 @@ func TestClient_autoUnzip(t *testing.T) {
var b bytes.Buffer
w := gzip.NewWriter(&b)
_, err := w.Write([]byte("hello world"))
require.NoError(t, err)
must.NoError(t, err)
err = w.Close()
require.NoError(t, err)
must.NoError(t, err)
// content-encoding is gzip and body is gzip data
try(&http.Response{

View File

@@ -1,10 +1,10 @@
package api
import (
"reflect"
"testing"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/shoenig/test/must"
)
func TestCompose(t *testing.T) {
@@ -140,7 +140,5 @@ func TestCompose(t *testing.T) {
},
},
}
if !reflect.DeepEqual(job, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, job)
}
must.Eq(t, expect, job)
}

View File

@@ -1,21 +1,20 @@
package api
import (
"reflect"
"testing"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/shoenig/test/must"
)
func TestCompose_Constraints(t *testing.T) {
testutil.Parallel(t)
c := NewConstraint("kernel.name", "=", "darwin")
expect := &Constraint{
LTarget: "kernel.name",
RTarget: "darwin",
Operand: "=",
}
if !reflect.DeepEqual(c, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, c)
}
must.Eq(t, expect, c)
}

View File

@@ -5,31 +5,33 @@ import (
"time"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestConsul_Canonicalize(t *testing.T) {
testutil.Parallel(t)
t.Run("missing ns", func(t *testing.T) {
c := new(Consul)
c.Canonicalize()
require.Empty(t, c.Namespace)
must.Eq(t, "", c.Namespace)
})
t.Run("complete", func(t *testing.T) {
c := &Consul{Namespace: "foo"}
c.Canonicalize()
require.Equal(t, "foo", c.Namespace)
must.Eq(t, "foo", c.Namespace)
})
}
func TestConsul_Copy(t *testing.T) {
testutil.Parallel(t)
t.Run("complete", func(t *testing.T) {
result := (&Consul{
Namespace: "foo",
}).Copy()
require.Equal(t, &Consul{
must.Eq(t, &Consul{
Namespace: "foo",
}, result)
})
@@ -37,28 +39,29 @@ func TestConsul_Copy(t *testing.T) {
func TestConsul_MergeNamespace(t *testing.T) {
testutil.Parallel(t)
t.Run("already set", func(t *testing.T) {
a := &Consul{Namespace: "foo"}
ns := pointerOf("bar")
a.MergeNamespace(ns)
require.Equal(t, "foo", a.Namespace)
require.Equal(t, "bar", *ns)
must.Eq(t, "foo", a.Namespace)
must.Eq(t, "bar", *ns)
})
t.Run("inherit", func(t *testing.T) {
a := &Consul{Namespace: ""}
ns := pointerOf("bar")
a.MergeNamespace(ns)
require.Equal(t, "bar", a.Namespace)
require.Equal(t, "bar", *ns)
must.Eq(t, "bar", a.Namespace)
must.Eq(t, "bar", *ns)
})
t.Run("parent is nil", func(t *testing.T) {
a := &Consul{Namespace: "foo"}
ns := (*string)(nil)
a.MergeNamespace(ns)
require.Equal(t, "foo", a.Namespace)
require.Nil(t, ns)
must.Eq(t, "foo", a.Namespace)
must.Nil(t, ns)
})
}
@@ -68,15 +71,15 @@ func TestConsulConnect_Canonicalize(t *testing.T) {
t.Run("nil connect", func(t *testing.T) {
cc := (*ConsulConnect)(nil)
cc.Canonicalize()
require.Nil(t, cc)
must.Nil(t, cc)
})
t.Run("empty connect", func(t *testing.T) {
cc := new(ConsulConnect)
cc.Canonicalize()
require.Empty(t, cc.Native)
require.Nil(t, cc.SidecarService)
require.Nil(t, cc.SidecarTask)
must.False(t, cc.Native)
must.Nil(t, cc.SidecarService)
must.Nil(t, cc.SidecarTask)
})
}
@@ -86,14 +89,14 @@ func TestConsulSidecarService_Canonicalize(t *testing.T) {
t.Run("nil sidecar_service", func(t *testing.T) {
css := (*ConsulSidecarService)(nil)
css.Canonicalize()
require.Nil(t, css)
must.Nil(t, css)
})
t.Run("empty sidecar_service", func(t *testing.T) {
css := new(ConsulSidecarService)
css.Canonicalize()
require.Empty(t, css.Tags)
require.Nil(t, css.Proxy)
must.SliceEmpty(t, css.Tags)
must.Nil(t, css.Proxy)
})
t.Run("non-empty sidecar_service", func(t *testing.T) {
@@ -106,7 +109,7 @@ func TestConsulSidecarService_Canonicalize(t *testing.T) {
},
}
css.Canonicalize()
require.Equal(t, &ConsulSidecarService{
must.Eq(t, &ConsulSidecarService{
Tags: nil,
Port: "port",
Proxy: &ConsulProxy{
@@ -122,17 +125,17 @@ func TestConsulProxy_Canonicalize(t *testing.T) {
t.Run("nil proxy", func(t *testing.T) {
cp := (*ConsulProxy)(nil)
cp.Canonicalize()
require.Nil(t, cp)
must.Nil(t, cp)
})
t.Run("empty proxy", func(t *testing.T) {
cp := new(ConsulProxy)
cp.Canonicalize()
require.Empty(t, cp.LocalServiceAddress)
require.Zero(t, cp.LocalServicePort)
require.Nil(t, cp.ExposeConfig)
require.Nil(t, cp.Upstreams)
require.Empty(t, cp.Config)
must.Eq(t, "", cp.LocalServiceAddress)
must.Zero(t, cp.LocalServicePort)
must.Nil(t, cp.ExposeConfig)
must.Nil(t, cp.Upstreams)
must.MapEmpty(t, cp.Config)
})
t.Run("non empty proxy", func(t *testing.T) {
@@ -144,11 +147,11 @@ func TestConsulProxy_Canonicalize(t *testing.T) {
Config: make(map[string]interface{}),
}
cp.Canonicalize()
require.Equal(t, "127.0.0.1", cp.LocalServiceAddress)
require.Equal(t, 80, cp.LocalServicePort)
require.Equal(t, &ConsulExposeConfig{}, cp.ExposeConfig)
require.Nil(t, cp.Upstreams)
require.Nil(t, cp.Config)
must.Eq(t, "127.0.0.1", cp.LocalServiceAddress)
must.Eq(t, 80, cp.LocalServicePort)
must.Eq(t, &ConsulExposeConfig{}, cp.ExposeConfig)
must.Nil(t, cp.Upstreams)
must.Nil(t, cp.Config)
})
}
@@ -158,7 +161,7 @@ func TestConsulUpstream_Copy(t *testing.T) {
t.Run("nil upstream", func(t *testing.T) {
cu := (*ConsulUpstream)(nil)
result := cu.Copy()
require.Nil(t, result)
must.Nil(t, result)
})
t.Run("complete upstream", func(t *testing.T) {
@@ -171,7 +174,7 @@ func TestConsulUpstream_Copy(t *testing.T) {
MeshGateway: &ConsulMeshGateway{Mode: "remote"},
}
result := cu.Copy()
require.Equal(t, cu, result)
must.Eq(t, cu, result)
})
}
@@ -181,7 +184,7 @@ func TestConsulUpstream_Canonicalize(t *testing.T) {
t.Run("nil upstream", func(t *testing.T) {
cu := (*ConsulUpstream)(nil)
cu.Canonicalize()
require.Nil(t, cu)
must.Nil(t, cu)
})
t.Run("complete", func(t *testing.T) {
@@ -194,7 +197,7 @@ func TestConsulUpstream_Canonicalize(t *testing.T) {
MeshGateway: &ConsulMeshGateway{Mode: ""},
}
cu.Canonicalize()
require.Equal(t, &ConsulUpstream{
must.Eq(t, &ConsulUpstream{
DestinationName: "dest1",
DestinationNamespace: "ns2",
Datacenter: "dc2",
@@ -211,19 +214,19 @@ func TestSidecarTask_Canonicalize(t *testing.T) {
t.Run("nil sidecar_task", func(t *testing.T) {
st := (*SidecarTask)(nil)
st.Canonicalize()
require.Nil(t, st)
must.Nil(t, st)
})
t.Run("empty sidecar_task", func(t *testing.T) {
st := new(SidecarTask)
st.Canonicalize()
require.Nil(t, st.Config)
require.Nil(t, st.Env)
require.Equal(t, DefaultResources(), st.Resources)
require.Equal(t, DefaultLogConfig(), st.LogConfig)
require.Nil(t, st.Meta)
require.Equal(t, 5*time.Second, *st.KillTimeout)
require.Equal(t, 0*time.Second, *st.ShutdownDelay)
must.Nil(t, st.Config)
must.Nil(t, st.Env)
must.Eq(t, DefaultResources(), st.Resources)
must.Eq(t, DefaultLogConfig(), st.LogConfig)
must.Nil(t, st.Meta)
must.Eq(t, 5*time.Second, *st.KillTimeout)
must.Eq(t, 0*time.Second, *st.ShutdownDelay)
})
t.Run("non empty sidecar_task resources", func(t *testing.T) {
@@ -233,7 +236,7 @@ func TestSidecarTask_Canonicalize(t *testing.T) {
Resources: &Resources{MemoryMB: pointerOf(333)},
}
st.Canonicalize()
require.Equal(t, exp, st.Resources)
must.Eq(t, exp, st.Resources)
})
}
@@ -243,7 +246,7 @@ func TestConsulGateway_Canonicalize(t *testing.T) {
t.Run("nil", func(t *testing.T) {
cg := (*ConsulGateway)(nil)
cg.Canonicalize()
require.Nil(t, cg)
must.Nil(t, cg)
})
t.Run("set defaults", func(t *testing.T) {
@@ -263,13 +266,13 @@ func TestConsulGateway_Canonicalize(t *testing.T) {
},
}
cg.Canonicalize()
require.Equal(t, pointerOf(5*time.Second), cg.Proxy.ConnectTimeout)
require.True(t, cg.Proxy.EnvoyGatewayBindTaggedAddresses)
require.Nil(t, cg.Proxy.EnvoyGatewayBindAddresses)
require.True(t, cg.Proxy.EnvoyGatewayNoDefaultBind)
require.Empty(t, cg.Proxy.EnvoyDNSDiscoveryType)
require.Nil(t, cg.Proxy.Config)
require.Nil(t, cg.Ingress.Listeners)
must.Eq(t, pointerOf(5*time.Second), cg.Proxy.ConnectTimeout)
must.True(t, cg.Proxy.EnvoyGatewayBindTaggedAddresses)
must.Nil(t, cg.Proxy.EnvoyGatewayBindAddresses)
must.True(t, cg.Proxy.EnvoyGatewayNoDefaultBind)
must.Eq(t, "", cg.Proxy.EnvoyDNSDiscoveryType)
must.Nil(t, cg.Proxy.Config)
must.Nil(t, cg.Ingress.Listeners)
})
}
@@ -278,7 +281,7 @@ func TestConsulGateway_Copy(t *testing.T) {
t.Run("nil", func(t *testing.T) {
result := (*ConsulGateway)(nil).Copy()
require.Nil(t, result)
must.Nil(t, result)
})
gateway := &ConsulGateway{
@@ -320,7 +323,7 @@ func TestConsulGateway_Copy(t *testing.T) {
t.Run("complete", func(t *testing.T) {
result := gateway.Copy()
require.Equal(t, gateway, result)
must.Eq(t, gateway, result)
})
}
@@ -330,7 +333,7 @@ func TestConsulIngressConfigEntry_Canonicalize(t *testing.T) {
t.Run("nil", func(t *testing.T) {
c := (*ConsulIngressConfigEntry)(nil)
c.Canonicalize()
require.Nil(t, c)
must.Nil(t, c)
})
t.Run("empty fields", func(t *testing.T) {
@@ -339,8 +342,8 @@ func TestConsulIngressConfigEntry_Canonicalize(t *testing.T) {
Listeners: []*ConsulIngressListener{},
}
c.Canonicalize()
require.Nil(t, c.TLS)
require.Nil(t, c.Listeners)
must.Nil(t, c.TLS)
must.Nil(t, c.Listeners)
})
t.Run("complete", func(t *testing.T) {
@@ -356,7 +359,7 @@ func TestConsulIngressConfigEntry_Canonicalize(t *testing.T) {
}},
}
c.Canonicalize()
require.Equal(t, &ConsulIngressConfigEntry{
must.Eq(t, &ConsulIngressConfigEntry{
TLS: &ConsulGatewayTLSConfig{Enabled: true},
Listeners: []*ConsulIngressListener{{
Port: 9090,
@@ -375,7 +378,7 @@ func TestConsulIngressConfigEntry_Copy(t *testing.T) {
t.Run("nil", func(t *testing.T) {
result := (*ConsulIngressConfigEntry)(nil).Copy()
require.Nil(t, result)
must.Nil(t, result)
})
entry := &ConsulIngressConfigEntry{
@@ -397,7 +400,7 @@ func TestConsulIngressConfigEntry_Copy(t *testing.T) {
t.Run("complete", func(t *testing.T) {
result := entry.Copy()
require.Equal(t, entry, result)
must.Eq(t, entry, result)
})
}
@@ -407,7 +410,7 @@ func TestConsulTerminatingConfigEntry_Canonicalize(t *testing.T) {
t.Run("nil", func(t *testing.T) {
c := (*ConsulTerminatingConfigEntry)(nil)
c.Canonicalize()
require.Nil(t, c)
must.Nil(t, c)
})
t.Run("empty services", func(t *testing.T) {
@@ -415,7 +418,7 @@ func TestConsulTerminatingConfigEntry_Canonicalize(t *testing.T) {
Services: []*ConsulLinkedService{},
}
c.Canonicalize()
require.Nil(t, c.Services)
must.Nil(t, c.Services)
})
}
@@ -424,7 +427,7 @@ func TestConsulTerminatingConfigEntry_Copy(t *testing.T) {
t.Run("nil", func(t *testing.T) {
result := (*ConsulIngressConfigEntry)(nil).Copy()
require.Nil(t, result)
must.Nil(t, result)
})
entry := &ConsulTerminatingConfigEntry{
@@ -441,7 +444,7 @@ func TestConsulTerminatingConfigEntry_Copy(t *testing.T) {
t.Run("complete", func(t *testing.T) {
result := entry.Copy()
require.Equal(t, entry, result)
must.Eq(t, entry, result)
})
}
@@ -451,13 +454,13 @@ func TestConsulMeshConfigEntry_Canonicalize(t *testing.T) {
t.Run("nil", func(t *testing.T) {
ce := (*ConsulMeshConfigEntry)(nil)
ce.Canonicalize()
require.Nil(t, ce)
must.Nil(t, ce)
})
t.Run("instantiated", func(t *testing.T) {
ce := new(ConsulMeshConfigEntry)
ce.Canonicalize()
require.NotNil(t, ce)
must.NotNil(t, ce)
})
}
@@ -467,13 +470,13 @@ func TestConsulMeshConfigEntry_Copy(t *testing.T) {
t.Run("nil", func(t *testing.T) {
ce := (*ConsulMeshConfigEntry)(nil)
ce2 := ce.Copy()
require.Nil(t, ce2)
must.Nil(t, ce2)
})
t.Run("instantiated", func(t *testing.T) {
ce := new(ConsulMeshConfigEntry)
ce2 := ce.Copy()
require.NotNil(t, ce2)
must.NotNil(t, ce2)
})
}
@@ -483,19 +486,19 @@ func TestConsulMeshGateway_Canonicalize(t *testing.T) {
t.Run("nil", func(t *testing.T) {
c := (*ConsulMeshGateway)(nil)
c.Canonicalize()
require.Nil(t, c)
must.Nil(t, c)
})
t.Run("unset mode", func(t *testing.T) {
c := &ConsulMeshGateway{Mode: ""}
c.Canonicalize()
require.Equal(t, "", c.Mode)
must.Eq(t, "", c.Mode)
})
t.Run("set mode", func(t *testing.T) {
c := &ConsulMeshGateway{Mode: "remote"}
c.Canonicalize()
require.Equal(t, "remote", c.Mode)
must.Eq(t, "remote", c.Mode)
})
}
@@ -505,7 +508,7 @@ func TestConsulMeshGateway_Copy(t *testing.T) {
t.Run("nil", func(t *testing.T) {
c := (*ConsulMeshGateway)(nil)
result := c.Copy()
require.Nil(t, result)
must.Nil(t, result)
})
t.Run("instantiated", func(t *testing.T) {
@@ -513,7 +516,7 @@ func TestConsulMeshGateway_Copy(t *testing.T) {
Mode: "local",
}
result := c.Copy()
require.Equal(t, c, result)
must.Eq(t, c, result)
})
}
@@ -523,7 +526,7 @@ func TestConsulGatewayTLSConfig_Copy(t *testing.T) {
t.Run("nil", func(t *testing.T) {
c := (*ConsulGatewayTLSConfig)(nil)
result := c.Copy()
require.Nil(t, result)
must.Nil(t, result)
})
t.Run("enabled", func(t *testing.T) {
@@ -531,7 +534,7 @@ func TestConsulGatewayTLSConfig_Copy(t *testing.T) {
Enabled: true,
}
result := c.Copy()
require.Equal(t, c, result)
must.Eq(t, c, result)
})
t.Run("customized", func(t *testing.T) {
@@ -542,6 +545,6 @@ func TestConsulGatewayTLSConfig_Copy(t *testing.T) {
CipherSuites: []string{"foo", "bar"},
}
result := c.Copy()
require.Equal(t, c, result)
must.Eq(t, c, result)
})
}

View File

@@ -4,7 +4,7 @@ import (
"testing"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
// TestCSIVolumes_CRUD fails because of a combination of removing the job to plugin creation
@@ -14,89 +14,94 @@ import (
// 2. Build and deploy a dummy CSI plugin via a job, and have it really fingerprint
func TestCSIVolumes_CRUD(t *testing.T) {
testutil.Parallel(t)
c, s, root := makeACLClient(t, nil, nil)
defer s.Stop()
v := c.CSIVolumes()
// Successful empty result
vols, qm, err := v.List(nil)
require.NoError(t, err)
require.NotEqual(t, 0, qm.LastIndex)
require.Equal(t, 0, len(vols))
must.NoError(t, err)
// must.Positive(t, qm.LastIndex) TODO(tgross), this was always broken?
_ = qm
must.SliceEmpty(t, vols)
_ = root
// FIXME we're bailing out here until one of the fixes is available
return
/*
// Authorized QueryOpts. Use the root token to just bypass ACL details
opts := &QueryOptions{
Region: "global",
Namespace: "default",
AuthToken: root.SecretID,
}
// Authorized QueryOpts. Use the root token to just bypass ACL details
opts := &QueryOptions{
Region: "global",
Namespace: "default",
AuthToken: root.SecretID,
}
wpts := &WriteOptions{
Region: "global",
Namespace: "default",
AuthToken: root.SecretID,
}
wpts := &WriteOptions{
Region: "global",
Namespace: "default",
AuthToken: root.SecretID,
}
// Create node plugins
nodes, _, err := c.Nodes().List(nil)
require.NoError(t, err)
require.Equal(t, 1, len(nodes))
// Create node plugins
nodes, _, err := c.Nodes().List(nil)
require.NoError(t, err)
require.Equal(t, 1, len(nodes))
nodeStub := nodes[0]
node, _, err := c.Nodes().Info(nodeStub.ID, nil)
require.NoError(t, err)
node.CSINodePlugins = map[string]*CSIInfo{
"foo": {
PluginID: "foo",
Healthy: true,
RequiresControllerPlugin: false,
RequiresTopologies: false,
NodeInfo: &CSINodeInfo{
ID: nodeStub.ID,
MaxVolumes: 200,
nodeStub := nodes[0]
node, _, err := c.Nodes().Info(nodeStub.ID, nil)
require.NoError(t, err)
node.CSINodePlugins = map[string]*CSIInfo{
"foo": {
PluginID: "foo",
Healthy: true,
RequiresControllerPlugin: false,
RequiresTopologies: false,
NodeInfo: &CSINodeInfo{
ID: nodeStub.ID,
MaxVolumes: 200,
},
},
},
}
}
// Register a volume
// This id is here as a string to avoid importing helper, which causes the lint
// rule that checks that the api package is isolated to fail
id := "DEADBEEF-31B5-8F78-7986-DD404FDA0CD1"
_, err = v.Register(&CSIVolume{
ID: id,
Namespace: "default",
PluginID: "foo",
AccessMode: CSIVolumeAccessModeMultiNodeSingleWriter,
AttachmentMode: CSIVolumeAttachmentModeFilesystem,
Topologies: []*CSITopology{{Segments: map[string]string{"foo": "bar"}}},
}, wpts)
require.NoError(t, err)
// Register a volume
// This id is here as a string to avoid importing helper, which causes the lint
// rule that checks that the api package is isolated to fail
id := "DEADBEEF-31B5-8F78-7986-DD404FDA0CD1"
_, err = v.Register(&CSIVolume{
ID: id,
Namespace: "default",
PluginID: "foo",
AccessMode: CSIVolumeAccessModeMultiNodeSingleWriter,
AttachmentMode: CSIVolumeAttachmentModeFilesystem,
Topologies: []*CSITopology{{Segments: map[string]string{"foo": "bar"}}},
}, wpts)
require.NoError(t, err)
// Successful result with volumes
vols, qm, err = v.List(opts)
require.NoError(t, err)
require.NotEqual(t, 0, qm.LastIndex)
require.Equal(t, 1, len(vols))
// Successful result with volumes
vols, qm, err = v.List(opts)
require.NoError(t, err)
require.NotEqual(t, 0, qm.LastIndex)
require.Equal(t, 1, len(vols))
// Successful info query
vol, qm, err := v.Info(id, opts)
require.NoError(t, err)
require.Equal(t, "bar", vol.Topologies[0].Segments["foo"])
// Successful info query
vol, qm, err := v.Info(id, opts)
require.NoError(t, err)
require.Equal(t, "bar", vol.Topologies[0].Segments["foo"])
// Deregister the volume
err = v.Deregister(id, true, wpts)
require.NoError(t, err)
// Deregister the volume
err = v.Deregister(id, true, wpts)
require.NoError(t, err)
// Successful empty result
vols, qm, err = v.List(nil)
require.NoError(t, err)
require.NotEqual(t, 0, qm.LastIndex)
require.Equal(t, 0, len(vols))
// Successful empty result
vols, qm, err = v.List(nil)
require.NoError(t, err)
require.NotEqual(t, 0, qm.LastIndex)
require.Equal(t, 0, len(vols))
// Failed info query
vol, qm, err = v.Info(id, opts)
require.Error(t, err, "missing")
// Failed info query
vol, qm, err = v.Info(id, opts)
require.Error(t, err, "missing")
*/
}

View File

@@ -8,41 +8,41 @@ import (
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/shoenig/test/must"
"github.com/shoenig/test/wait"
"github.com/stretchr/testify/require"
)
func TestEvaluations_List(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
e := c.Evaluations()
// Listing when nothing exists returns empty
result, qm, err := e.List(nil)
require.NoError(t, err)
require.Equal(t, uint64(0), qm.LastIndex, "bad index")
require.Equal(t, 0, len(result), "expected 0 evaluations")
must.NoError(t, err)
must.Eq(t, 0, qm.LastIndex)
must.SliceEmpty(t, result)
// Register a job. This will create an evaluation.
jobs := c.Jobs()
job := testJob()
resp, wm, err := jobs.Register(job, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Check the evaluations again
result, qm, err = e.List(nil)
require.NoError(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
// if the eval fails fast there can be more than 1
// but they are in order of most recent first, so look at the last one
require.Greater(t, len(result), 0, "expected eval (%s), got none", resp.EvalID)
must.Positive(t, len(result))
idx := len(result) - 1
require.Equal(t, resp.EvalID, result[idx].ID, "expected eval (%s), got: %#v", resp.EvalID, result[idx])
must.Eq(t, resp.EvalID, result[idx].ID)
// wait until the 2nd eval shows up before we try paging
results := []*Evaluation{}
var results []*Evaluation
f := func() error {
results, _, err = e.List(nil)
@@ -60,95 +60,97 @@ func TestEvaluations_List(t *testing.T) {
result, qm, err = e.List(&QueryOptions{
PerPage: int32(1),
})
require.NoError(t, err)
require.Equal(t, 1, len(result), "expected no evals after last one but got %d: %#v", len(result), result)
must.NoError(t, err)
must.Len(t, 1, result)
// query second page
result, qm, err = e.List(&QueryOptions{
PerPage: int32(1),
NextToken: qm.NextToken,
})
require.NoError(t, err)
require.Equal(t, 1, len(result), "expected no evals after last one but got %d: %#v", len(result), result)
must.NoError(t, err)
must.Len(t, 1, result)
// Query evaluations using a filter.
results, _, err = e.List(&QueryOptions{
Filter: `TriggeredBy == "job-register"`,
})
require.Equal(t, 1, len(result), "expected 1 eval, got %d", len(result))
must.Len(t, 1, result)
}
func TestEvaluations_PrefixList(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
e := c.Evaluations()
// Listing when nothing exists returns empty
result, qm, err := e.PrefixList("abcdef")
require.NoError(t, err)
require.Equal(t, uint64(0), qm.LastIndex, "bad index")
require.Equal(t, 0, len(result), "expected 0 evaluations")
must.NoError(t, err)
must.Eq(t, 0, qm.LastIndex)
must.SliceEmpty(t, result)
// Register a job. This will create an evaluation.
jobs := c.Jobs()
job := testJob()
resp, wm, err := jobs.Register(job, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Check the evaluations again
result, qm, err = e.PrefixList(resp.EvalID[:4])
require.NoError(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
// Check if we have the right list
require.Equal(t, 1, len(result))
require.Equal(t, resp.EvalID, result[0].ID)
must.Len(t, 1, result)
must.Eq(t, resp.EvalID, result[0].ID)
}
func TestEvaluations_Info(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
e := c.Evaluations()
// Querying a nonexistent evaluation returns error
_, _, err := e.Info("8E231CF4-CA48-43FF-B694-5801E69E22FA", nil)
require.Error(t, err)
must.Error(t, err)
// Register a job. Creates a new evaluation.
jobs := c.Jobs()
job := testJob()
resp, wm, err := jobs.Register(job, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Try looking up by the new eval ID
result, qm, err := e.Info(resp.EvalID, nil)
require.NoError(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
// Check that we got the right result
require.NotNil(t, result)
require.Equal(t, resp.EvalID, result.ID)
must.NotNil(t, result)
must.Eq(t, resp.EvalID, result.ID)
// Register the job again to get a related eval
resp, wm, err = jobs.Register(job, nil)
evals, _, err := e.List(nil)
require.NoError(t, err)
must.NoError(t, err)
// Find an eval that should have related evals
for _, eval := range evals {
if eval.NextEval != "" || eval.PreviousEval != "" || eval.BlockedEval != "" {
result, qm, err := e.Info(eval.ID, &QueryOptions{
result, qm, err = e.Info(eval.ID, &QueryOptions{
Params: map[string]string{
"related": "true",
},
})
require.NoError(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
require.NotNil(t, result.RelatedEvals)
must.NotNil(t, result.RelatedEvals)
}
}
}
@@ -162,22 +164,22 @@ func TestEvaluations_Delete(t *testing.T) {
// Attempting to delete an evaluation when the eval broker is not paused
// should return an error.
wm, err := testClient.Evaluations().Delete([]string{"8E231CF4-CA48-43FF-B694-5801E69E22FA"}, nil)
require.Nil(t, wm)
require.ErrorContains(t, err, "eval broker is enabled")
must.Nil(t, wm)
must.ErrorContains(t, err, "eval broker is enabled")
// Pause the eval broker, and try to delete an evaluation that does not
// exist.
schedulerConfig, _, err := testClient.Operator().SchedulerGetConfiguration(nil)
require.NoError(t, err)
require.NotNil(t, schedulerConfig)
must.NoError(t, err)
must.NotNil(t, schedulerConfig)
schedulerConfig.SchedulerConfig.PauseEvalBroker = true
schedulerConfigUpdated, _, err := testClient.Operator().SchedulerCASConfiguration(schedulerConfig.SchedulerConfig, nil)
require.NoError(t, err)
require.True(t, schedulerConfigUpdated.Updated)
must.NoError(t, err)
must.True(t, schedulerConfigUpdated.Updated)
wm, err = testClient.Evaluations().Delete([]string{"8E231CF4-CA48-43FF-B694-5801E69E22FA"}, nil)
require.ErrorContains(t, err, "eval not found")
must.ErrorContains(t, err, "eval not found")
}
func TestEvaluations_Allocations(t *testing.T) {
@@ -188,9 +190,9 @@ func TestEvaluations_Allocations(t *testing.T) {
// Returns empty if no allocations
allocs, qm, err := e.Allocations("8E231CF4-CA48-43FF-B694-5801E69E22FA", nil)
require.NoError(t, err)
require.Equal(t, uint64(0), qm.LastIndex, "bad index")
require.Equal(t, 0, len(allocs), "expected 0 evaluations")
must.NoError(t, err)
must.Eq(t, 0, qm.LastIndex)
must.SliceEmpty(t, allocs)
}
func TestEvaluations_Sort(t *testing.T) {
@@ -207,5 +209,5 @@ func TestEvaluations_Sort(t *testing.T) {
{CreateIndex: 2},
{CreateIndex: 1},
}
require.Equal(t, expect, evals)
must.Eq(t, expect, evals)
}

View File

@@ -8,7 +8,7 @@ import (
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/mitchellh/mapstructure"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestTopic_String(t *testing.T) {
@@ -51,7 +51,7 @@ func TestTopic_String(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.expectedOutput, func(t *testing.T) {
actualOutput := tc.inputTopic.String()
require.Equal(t, tc.expectedOutput, actualOutput)
must.Eq(t, tc.expectedOutput, actualOutput)
})
}
}
@@ -66,8 +66,8 @@ func TestEvent_Stream(t *testing.T) {
jobs := c.Jobs()
job := testJob()
resp2, _, err := jobs.Register(job, nil)
require.Nil(t, err)
require.NotNil(t, resp2)
must.NoError(t, err)
must.NotNil(t, resp2)
// build event stream request
events := c.EventStream()
@@ -80,17 +80,17 @@ func TestEvent_Stream(t *testing.T) {
defer cancel()
streamCh, err := events.Stream(ctx, topics, 0, q)
require.NoError(t, err)
must.NoError(t, err)
select {
case event := <-streamCh:
if event.Err != nil {
require.Fail(t, err.Error())
must.Unreachable(t, must.Sprintf("unexpected %v", event.Err))
}
require.Equal(t, len(event.Events), 1)
require.Equal(t, "Evaluation", string(event.Events[0].Topic))
must.Len(t, 1, event.Events)
must.Eq(t, "Evaluation", string(event.Events[0].Topic))
case <-time.After(5 * time.Second):
require.Fail(t, "failed waiting for event stream event")
must.Unreachable(t, must.Sprint("failed waiting for event stream event"))
}
}
@@ -104,8 +104,8 @@ func TestEvent_Stream_Err_InvalidQueryParam(t *testing.T) {
jobs := c.Jobs()
job := testJob()
resp2, _, err := jobs.Register(job, nil)
require.Nil(t, err)
require.NotNil(t, resp2)
must.NoError(t, err)
must.NotNil(t, resp2)
// build event stream request
events := c.EventStream()
@@ -118,9 +118,7 @@ func TestEvent_Stream_Err_InvalidQueryParam(t *testing.T) {
defer cancel()
_, err = events.Stream(ctx, topics, 0, q)
require.Error(t, err)
require.Contains(t, err.Error(), "400")
require.Contains(t, err.Error(), "Invalid key value pair")
must.ErrorContains(t, err, "Invalid key value pair")
}
func TestEvent_Stream_CloseCtx(t *testing.T) {
@@ -133,8 +131,8 @@ func TestEvent_Stream_CloseCtx(t *testing.T) {
jobs := c.Jobs()
job := testJob()
resp2, _, err := jobs.Register(job, nil)
require.Nil(t, err)
require.NotNil(t, resp2)
must.NoError(t, err)
must.NotNil(t, resp2)
// build event stream request
events := c.EventStream()
@@ -146,17 +144,17 @@ func TestEvent_Stream_CloseCtx(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
streamCh, err := events.Stream(ctx, topics, 0, q)
require.NoError(t, err)
must.NoError(t, err)
// cancel the request
cancel()
select {
case event, ok := <-streamCh:
require.False(t, ok)
require.Nil(t, event)
must.False(t, ok)
must.Nil(t, event)
case <-time.After(5 * time.Second):
require.Fail(t, "failed waiting for event stream event")
must.Unreachable(t, must.Sprint("failed waiting for event stream event"))
}
}
@@ -172,8 +170,8 @@ func TestEventStream_PayloadValue(t *testing.T) {
jobs := c.Jobs()
job := testJob()
resp2, _, err := jobs.Register(job, nil)
require.Nil(t, err)
require.NotNil(t, resp2)
must.NoError(t, err)
must.NotNil(t, resp2)
// build event stream request
events := c.EventStream()
@@ -186,18 +184,18 @@ func TestEventStream_PayloadValue(t *testing.T) {
defer cancel()
streamCh, err := events.Stream(ctx, topics, 0, q)
require.NoError(t, err)
must.NoError(t, err)
select {
case event := <-streamCh:
if event.Err != nil {
require.NoError(t, err)
must.NoError(t, err)
}
for _, e := range event.Events {
// verify that we get a node
n, err := e.Node()
require.NoError(t, err)
require.NotEmpty(t, n.ID)
must.NoError(t, err)
must.UUIDv4(t, n.ID)
// perform a raw decoding and look for:
// - "ID" to make sure that raw decoding is working correctly
@@ -207,15 +205,15 @@ func TestEventStream_PayloadValue(t *testing.T) {
Result: &raw,
}
dec, err := mapstructure.NewDecoder(cfg)
require.NoError(t, err)
require.NoError(t, dec.Decode(e.Payload))
require.Contains(t, raw, "Node")
must.NoError(t, err)
must.NoError(t, dec.Decode(e.Payload))
must.MapContainsKeys(t, raw, []string{"Node"})
rawNode := raw["Node"]
require.Equal(t, n.ID, rawNode["ID"])
require.Empty(t, rawNode["SecretID"])
must.Eq(t, n.ID, rawNode["ID"].(string))
must.Eq(t, "", rawNode["SecretID"])
}
case <-time.After(5 * time.Second):
require.Fail(t, "failed waiting for event stream event")
must.Unreachable(t, must.Sprint("failed waiting for event stream event"))
}
}
@@ -234,13 +232,12 @@ func TestEventStream_PayloadValueHelpers(t *testing.T) {
input: []byte(`{"Topic": "Deployment", "Payload": {"Deployment":{"ID":"some-id","JobID":"some-job-id", "TaskGroups": {"tg1": {"RequireProgressBy": "2020-11-05T11:52:54.370774000-05:00"}}}}}`),
expectFn: func(t *testing.T, event Event) {
eventTime, err := time.Parse(time.RFC3339, "2020-11-05T11:52:54.370774000-05:00")
require.NoError(t, err)
require.Equal(t, TopicDeployment, event.Topic)
must.NoError(t, err)
must.Eq(t, TopicDeployment, event.Topic)
d, err := event.Deployment()
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, &Deployment{
must.NoError(t, err)
must.Eq(t, &Deployment{
ID: "some-id",
JobID: "some-job-id",
TaskGroups: map[string]*DeploymentState{
@@ -255,11 +252,10 @@ func TestEventStream_PayloadValueHelpers(t *testing.T) {
desc: "evaluation",
input: []byte(`{"Topic": "Evaluation", "Payload": {"Evaluation":{"ID":"some-id","Namespace":"some-namespace-id"}}}`),
expectFn: func(t *testing.T, event Event) {
require.Equal(t, TopicEvaluation, event.Topic)
must.Eq(t, TopicEvaluation, event.Topic)
eval, err := event.Evaluation()
require.NoError(t, err)
require.Equal(t, &Evaluation{
must.NoError(t, err)
must.Eq(t, &Evaluation{
ID: "some-id",
Namespace: "some-namespace-id",
}, eval)
@@ -269,10 +265,10 @@ func TestEventStream_PayloadValueHelpers(t *testing.T) {
desc: "allocation",
input: []byte(`{"Topic": "Allocation", "Payload": {"Allocation":{"ID":"some-id","Namespace":"some-namespace-id"}}}`),
expectFn: func(t *testing.T, event Event) {
require.Equal(t, TopicAllocation, event.Topic)
must.Eq(t, TopicAllocation, event.Topic)
a, err := event.Allocation()
require.NoError(t, err)
require.Equal(t, &Allocation{
must.NoError(t, err)
must.Eq(t, &Allocation{
ID: "some-id",
Namespace: "some-namespace-id",
}, a)
@@ -281,10 +277,10 @@ func TestEventStream_PayloadValueHelpers(t *testing.T) {
{
input: []byte(`{"Topic": "Job", "Payload": {"Job":{"ID":"some-id","Namespace":"some-namespace-id"}}}`),
expectFn: func(t *testing.T, event Event) {
require.Equal(t, TopicJob, event.Topic)
must.Eq(t, TopicJob, event.Topic)
j, err := event.Job()
require.NoError(t, err)
require.Equal(t, &Job{
must.NoError(t, err)
must.Eq(t, &Job{
ID: pointerOf("some-id"),
Namespace: pointerOf("some-namespace-id"),
}, j)
@@ -294,10 +290,10 @@ func TestEventStream_PayloadValueHelpers(t *testing.T) {
desc: "node",
input: []byte(`{"Topic": "Node", "Payload": {"Node":{"ID":"some-id","Datacenter":"some-dc-id"}}}`),
expectFn: func(t *testing.T, event Event) {
require.Equal(t, TopicNode, event.Topic)
must.Eq(t, TopicNode, event.Topic)
n, err := event.Node()
require.NoError(t, err)
require.Equal(t, &Node{
must.NoError(t, err)
must.Eq(t, &Node{
ID: "some-id",
Datacenter: "some-dc-id",
}, n)
@@ -307,12 +303,12 @@ func TestEventStream_PayloadValueHelpers(t *testing.T) {
desc: "service",
input: []byte(`{"Topic": "Service", "Payload": {"Service":{"ID":"some-service-id","Namespace":"some-service-namespace-id","Datacenter":"us-east-1a"}}}`),
expectFn: func(t *testing.T, event Event) {
require.Equal(t, TopicService, event.Topic)
must.Eq(t, TopicService, event.Topic)
a, err := event.Service()
require.NoError(t, err)
require.Equal(t, "us-east-1a", a.Datacenter)
require.Equal(t, "some-service-id", a.ID)
require.Equal(t, "some-service-namespace-id", a.Namespace)
must.NoError(t, err)
must.Eq(t, "us-east-1a", a.Datacenter)
must.Eq(t, "some-service-id", a.ID)
must.Eq(t, "some-service-namespace-id", a.Namespace)
},
},
}
@@ -321,7 +317,7 @@ func TestEventStream_PayloadValueHelpers(t *testing.T) {
t.Run(tc.desc, func(t *testing.T) {
var out Event
err := json.Unmarshal(tc.input, &out)
require.NoError(t, err)
must.NoError(t, err)
tc.expectFn(t, out)
})
}

View File

@@ -5,14 +5,12 @@ import (
"errors"
"fmt"
"io"
"reflect"
"strings"
"testing"
"time"
"github.com/docker/go-units"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/shoenig/test"
"github.com/shoenig/test/must"
"github.com/shoenig/test/wait"
)
@@ -129,7 +127,7 @@ func TestFS_Logs(t *testing.T) {
}
// Check length
test.Eq(t, input.Len(), result.Len())
must.Eq(t, input.Len(), result.Len())
// Check complete ordering
for i := 0; i < lines; i++ {
@@ -142,6 +140,7 @@ func TestFS_Logs(t *testing.T) {
func TestFS_FrameReader(t *testing.T) {
testutil.Parallel(t)
// Create a channel of the frames and a cancel channel
framesCh := make(chan *StreamFrame, 3)
errCh := make(chan error)
@@ -176,12 +175,8 @@ func TestFS_FrameReader(t *testing.T) {
p := make([]byte, 12)
n, err := r.Read(p[:5])
if err != nil {
t.Fatalf("Read failed: %v", err)
}
if off := r.Offset(); off != n {
t.Fatalf("unexpected read bytes: got %v; wanted %v", n, off)
}
must.NoError(t, err)
must.Eq(t, n, r.Offset())
off := n
for {
@@ -190,24 +185,16 @@ func TestFS_FrameReader(t *testing.T) {
if err == io.EOF {
break
}
t.Fatalf("Read failed: %v", err)
must.NoError(t, err)
}
off += n
}
if !reflect.DeepEqual(p, expected) {
t.Fatalf("read %q, wanted %q", string(p), string(expected))
}
if err := r.Close(); err != nil {
t.Fatalf("Close() failed: %v", err)
}
if _, ok := <-cancelCh; ok {
t.Fatalf("Close() didn't close cancel channel")
}
if len(expected) != r.Offset() {
t.Fatalf("offset %d, wanted %d", r.Offset(), len(expected))
}
must.Eq(t, expected, p)
must.NoError(t, r.Close())
_, ok := <-cancelCh
must.False(t, ok)
must.Eq(t, len(expected), r.Offset())
}
func TestFS_FrameReader_Unblock(t *testing.T) {
@@ -224,13 +211,8 @@ func TestFS_FrameReader_Unblock(t *testing.T) {
p := make([]byte, 12)
n, err := r.Read(p)
if err != nil {
t.Fatalf("Read failed: %v", err)
}
if n != 0 {
t.Fatalf("should have unblocked")
}
must.NoError(t, err)
must.Zero(t, n)
// Unset the unblock
r.SetUnblockTime(0)
@@ -243,7 +225,7 @@ func TestFS_FrameReader_Unblock(t *testing.T) {
select {
case <-resultCh:
t.Fatalf("shouldn't have unblocked")
must.Unreachable(t, must.Sprint("must not have unblocked"))
case <-time.After(300 * time.Millisecond):
}
}
@@ -266,7 +248,5 @@ func TestFS_FrameReader_Error(t *testing.T) {
p := make([]byte, 12)
_, err := r.Read(p)
if err == nil || !strings.Contains(err.Error(), expected.Error()) {
t.Fatalf("bad error: %v", err)
}
must.ErrorIs(t, err, expected)
}

View File

@@ -8,20 +8,13 @@ require (
github.com/hashicorp/cronexpr v1.1.1
github.com/hashicorp/go-cleanhttp v0.5.2
github.com/hashicorp/go-rootcerts v1.0.2
github.com/kr/pretty v0.3.1
github.com/mitchellh/go-testing-interface v1.14.1
github.com/mitchellh/mapstructure v1.5.0
github.com/shoenig/test v0.5.2
github.com/stretchr/testify v1.8.1
)
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
github.com/stretchr/testify v1.8.1 // indirect
)

View File

@@ -1,4 +1,3 @@
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -14,21 +13,14 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/shoenig/test v0.5.2 h1:ELZ7qZ/6CPrT71PXrSe2TFzLs4/cGCqqU5lZ5RhZ+B8=
github.com/shoenig/test v0.5.2/go.mod h1:xYtyGBC5Q3kzCNyJg/SjgNpfAa2kvmgA0i5+lQso8x0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -39,8 +31,6 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -14,14 +14,15 @@ import (
"testing/iotest"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestChecksumValidatingReader(t *testing.T) {
testutil.Parallel(t)
data := make([]byte, 4096)
_, err := rand.Read(data)
require.NoError(t, err)
must.NoError(t, err)
cases := []struct {
algo string
@@ -33,23 +34,23 @@ func TestChecksumValidatingReader(t *testing.T) {
for _, c := range cases {
t.Run("valid: "+c.algo, func(t *testing.T) {
_, err := c.hash.Write(data)
require.NoError(t, err)
_, err = c.hash.Write(data)
must.NoError(t, err)
checksum := c.hash.Sum(nil)
digest := c.algo + "=" + base64.StdEncoding.EncodeToString(checksum)
r := iotest.HalfReader(bytes.NewReader(data))
cr, err := newChecksumValidatingReader(ioutil.NopCloser(r), digest)
require.NoError(t, err)
must.NoError(t, err)
_, err = io.Copy(ioutil.Discard, cr)
require.NoError(t, err)
must.NoError(t, err)
})
t.Run("invalid: "+c.algo, func(t *testing.T) {
_, err := c.hash.Write(data)
require.NoError(t, err)
_, err = c.hash.Write(data)
must.NoError(t, err)
checksum := c.hash.Sum(nil)
// mess up checksum
@@ -58,32 +59,31 @@ func TestChecksumValidatingReader(t *testing.T) {
r := iotest.HalfReader(bytes.NewReader(data))
cr, err := newChecksumValidatingReader(ioutil.NopCloser(r), digest)
require.NoError(t, err)
must.NoError(t, err)
_, err = io.Copy(ioutil.Discard, cr)
require.Error(t, err)
require.Equal(t, errMismatchChecksum, err)
must.ErrorIs(t, err, errMismatchChecksum)
})
}
}
func TestChecksumValidatingReader_PropagatesError(t *testing.T) {
testutil.Parallel(t)
pr, pw := io.Pipe()
defer pr.Close()
defer pw.Close()
defer func() { _ = pr.Close() }()
defer func() { _ = pw.Close() }()
expectedErr := errors.New("some error")
go func() {
pw.Write([]byte("some input"))
pw.CloseWithError(expectedErr)
_, _ = pw.Write([]byte("some input"))
_ = pw.CloseWithError(expectedErr)
}()
cr, err := newChecksumValidatingReader(pr, "sha-256=aaaa")
require.NoError(t, err)
must.NoError(t, err)
_, err = io.Copy(ioutil.Discard, cr)
require.Error(t, err)
require.Equal(t, expectedErr, err)
must.ErrorIs(t, err, expectedErr)
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,13 +3,13 @@ package api
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/shoenig/test/must"
)
func TestKeyring_CRUD(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
@@ -17,33 +17,33 @@ func TestKeyring_CRUD(t *testing.T) {
// Find the bootstrap key
keys, qm, err := kr.List(nil)
require.NoError(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
require.Len(t, keys, 1)
must.Len(t, 1, keys)
oldKeyID := keys[0].KeyID
// Create a key by requesting a rotation
key, wm, err := kr.Rotate(nil, nil)
require.NoError(t, err)
require.NotNil(t, key)
must.NoError(t, err)
must.NotNil(t, key)
assertWriteMeta(t, wm)
// Read all the keys
keys, qm, err = kr.List(&QueryOptions{WaitIndex: key.CreateIndex})
require.NoError(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
require.Len(t, keys, 2)
must.Len(t, 2, keys)
// Delete the old key
wm, err = kr.Delete(&KeyringDeleteOptions{KeyID: oldKeyID}, nil)
require.NoError(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Read all the keys back
keys, qm, err = kr.List(&QueryOptions{WaitIndex: key.CreateIndex})
require.NoError(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
require.Len(t, keys, 1)
require.Equal(t, key.KeyID, keys[0].KeyID)
require.Equal(t, RootKeyState(RootKeyStateActive), keys[0].State)
must.Len(t, 1, keys)
must.Eq(t, key.KeyID, keys[0].KeyID)
must.Eq(t, RootKeyState(RootKeyStateActive), keys[0].State)
}

View File

@@ -1,5 +1,4 @@
//go:build ent
// +build ent
package api
@@ -7,11 +6,12 @@ import (
"testing"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestOperator_LicenseGet(t *testing.T) {
testutil.Parallel(t)
c, s, _ := makeACLClient(t, nil, nil)
defer s.Stop()
@@ -19,11 +19,10 @@ func TestOperator_LicenseGet(t *testing.T) {
// Make authenticated request.
_, _, err := operator.LicenseGet(nil)
require.NoError(t, err)
must.NoError(t, err)
// Make unauthenticated request.
c.SetSecretID("")
_, _, err = operator.LicenseGet(nil)
require.Error(t, err)
require.Contains(t, err.Error(), "403")
must.ErrorContains(t, err, "403")
}

View File

@@ -4,11 +4,12 @@ import (
"testing"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestOperator_MetricsSummary(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
c.DevMode = true
})
@@ -22,18 +23,19 @@ func TestOperator_MetricsSummary(t *testing.T) {
}
metrics, qm, err := operator.MetricsSummary(qo)
require.NoError(t, err)
require.NotNil(t, metrics)
require.NotNil(t, qm)
require.NotNil(t, metrics.Timestamp) // should always get a TimeStamp
require.GreaterOrEqual(t, len(metrics.Points), 0) // may not have points yet
require.GreaterOrEqual(t, len(metrics.Gauges), 1) // should have at least 1 gauge
require.GreaterOrEqual(t, len(metrics.Counters), 1) // should have at least 1 counter
require.GreaterOrEqual(t, len(metrics.Samples), 1) // should have at least 1 sample
must.NoError(t, err)
must.NotNil(t, metrics)
must.NotNil(t, qm)
must.NotNil(t, metrics.Timestamp) // should always get a TimeStamp
must.SliceEmpty(t, metrics.Points) // may not have points yet
must.SliceNotEmpty(t, metrics.Gauges) // should have at least 1 gauge
must.SliceNotEmpty(t, metrics.Counters) // should have at least 1 counter
must.SliceNotEmpty(t, metrics.Samples) // should have at least 1 sample
}
func TestOperator_Metrics_Prometheus(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
c.DevMode = true
c.Telemetry = &testutil.Telemetry{PrometheusMetrics: true}
@@ -48,8 +50,8 @@ func TestOperator_Metrics_Prometheus(t *testing.T) {
}
metrics, err := operator.Metrics(qo)
require.NoError(t, err)
require.NotNil(t, metrics)
must.NoError(t, err)
must.NotNil(t, metrics)
metricString := string(metrics[:])
require.Containsf(t, metricString, "# HELP", "expected Prometheus format containing \"# HELP\", got: \n%s", metricString)
must.StrContains(t, metricString, "# HELP")
}

View File

@@ -1,32 +1,29 @@
package api
import (
"strings"
"testing"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestOperator_RaftGetConfiguration(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
operator := c.Operator()
out, err := operator.RaftGetConfiguration(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(out.Servers) != 1 ||
!out.Servers[0].Leader ||
!out.Servers[0].Voter {
t.Fatalf("bad: %v", out)
}
must.NoError(t, err)
must.Len(t, 1, out.Servers)
must.True(t, out.Servers[0].Leader)
must.True(t, out.Servers[0].Voter)
}
func TestOperator_RaftRemovePeerByAddress(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
@@ -34,39 +31,35 @@ func TestOperator_RaftRemovePeerByAddress(t *testing.T) {
// through.
operator := c.Operator()
err := operator.RaftRemovePeerByAddress("nope", nil)
if err == nil || !strings.Contains(err.Error(),
"address \"nope\" was not found in the Raft configuration") {
t.Fatalf("err: %v", err)
}
must.ErrorContains(t, err, `address "nope" was not found in the Raft configuration`)
}
func TestOperator_RaftRemovePeerByID(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
// If we get this error, it proves we sent the address all the way
// through.
// If we get this error, it proves we sent the address all the way through.
operator := c.Operator()
err := operator.RaftRemovePeerByID("nope", nil)
if err == nil || !strings.Contains(err.Error(),
"id \"nope\" was not found in the Raft configuration") {
t.Fatalf("err: %v", err)
}
must.ErrorContains(t, err, `id "nope" was not found in the Raft configuration`)
}
func TestOperator_SchedulerGetConfiguration(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
schedulerConfig, _, err := c.Operator().SchedulerGetConfiguration(nil)
require.Nil(t, err)
require.NotEmpty(t, schedulerConfig)
must.NoError(t, err)
must.NotNil(t, schedulerConfig)
}
func TestOperator_SchedulerSetConfiguration(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
@@ -84,16 +77,15 @@ func TestOperator_SchedulerSetConfiguration(t *testing.T) {
}
schedulerConfigUpdateResp, _, err := c.Operator().SchedulerSetConfiguration(&newSchedulerConfig, nil)
require.Nil(t, err)
require.True(t, schedulerConfigUpdateResp.Updated)
must.NoError(t, err)
must.True(t, schedulerConfigUpdateResp.Updated)
// We can't exactly predict the query meta responses, so we test fields
// individually.
// We can't exactly predict the query meta responses, so we test fields individually.
schedulerConfig, _, err := c.Operator().SchedulerGetConfiguration(nil)
require.Nil(t, err)
require.Equal(t, schedulerConfig.SchedulerConfig.SchedulerAlgorithm, SchedulerAlgorithmSpread)
require.True(t, schedulerConfig.SchedulerConfig.PauseEvalBroker)
require.True(t, schedulerConfig.SchedulerConfig.RejectJobRegistration)
require.True(t, schedulerConfig.SchedulerConfig.MemoryOversubscriptionEnabled)
require.Equal(t, newSchedulerConfig.PreemptionConfig, schedulerConfig.SchedulerConfig.PreemptionConfig)
must.NoError(t, err)
must.Eq(t, SchedulerAlgorithmSpread, schedulerConfig.SchedulerConfig.SchedulerAlgorithm)
must.True(t, schedulerConfig.SchedulerConfig.PauseEvalBroker)
must.True(t, schedulerConfig.SchedulerConfig.RejectJobRegistration)
must.True(t, schedulerConfig.SchedulerConfig.MemoryOversubscriptionEnabled)
must.Eq(t, schedulerConfig.SchedulerConfig.PreemptionConfig, newSchedulerConfig.PreemptionConfig)
}

View File

@@ -1,5 +1,4 @@
//go:build ent
// +build ent
package api
@@ -7,12 +6,12 @@ import (
"testing"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/assert"
"github.com/shoenig/test/must"
)
func TestQuotas_Register(t *testing.T) {
testutil.Parallel(t)
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
@@ -20,20 +19,20 @@ func TestQuotas_Register(t *testing.T) {
// Create a quota spec and register it
qs := testQuotaSpec()
wm, err := quotas.Register(qs, nil)
assert.Nil(err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Query the specs back out again
resp, qm, err := quotas.List(nil)
assert.Nil(err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.Len(resp, 1)
assert.Equal(qs.Name, resp[0].Name)
must.Len(t, 1, resp)
must.Eq(t, qs.Name, resp[0].Name)
}
func TestQuotas_Register_Invalid(t *testing.T) {
testutil.Parallel(t)
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
@@ -42,64 +41,62 @@ func TestQuotas_Register_Invalid(t *testing.T) {
qs := testQuotaSpec()
qs.Name = "*"
_, err := quotas.Register(qs, nil)
assert.NotNil(err)
must.Error(t, err)
}
func TestQuotas_Info(t *testing.T) {
testutil.Parallel(t)
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
// Trying to retrieve a quota spec before it exists returns an error
_, _, err := quotas.Info("foo", nil)
assert.NotNil(err)
assert.Contains(err.Error(), "not found")
must.ErrorContains(t, err, "not found")
// Register the quota
qs := testQuotaSpec()
wm, err := quotas.Register(qs, nil)
assert.Nil(err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Query the quota again and ensure it exists
result, qm, err := quotas.Info(qs.Name, nil)
assert.Nil(err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.NotNil(result)
assert.Equal(qs.Name, result.Name)
must.NotNil(t, result)
must.Eq(t, qs.Name, result.Name)
}
func TestQuotas_Usage(t *testing.T) {
testutil.Parallel(t)
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
// Trying to retrieve a quota spec before it exists returns an error
_, _, err := quotas.Usage("foo", nil)
assert.NotNil(err)
assert.Contains(err.Error(), "not found")
must.ErrorContains(t, err, "not found")
// Register the quota
qs := testQuotaSpec()
wm, err := quotas.Register(qs, nil)
assert.Nil(err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Query the quota usage and ensure it exists
result, qm, err := quotas.Usage(qs.Name, nil)
assert.Nil(err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.NotNil(result)
assert.Equal(qs.Name, result.Name)
must.NotNil(t, result)
must.Eq(t, qs.Name, result.Name)
}
func TestQuotas_Delete(t *testing.T) {
testutil.Parallel(t)
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
@@ -107,31 +104,31 @@ func TestQuotas_Delete(t *testing.T) {
// Create a quota and register it
qs := testQuotaSpec()
wm, err := quotas.Register(qs, nil)
assert.Nil(err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Query the quota back out again
resp, qm, err := quotas.List(nil)
assert.Nil(err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.Len(resp, 1)
assert.Equal(qs.Name, resp[0].Name)
must.Len(t, 1, resp)
must.Eq(t, qs.Name, resp[0].Name)
// Delete the quota
wm, err = quotas.Delete(qs.Name, nil)
assert.Nil(err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Query the quotas back out again
resp, qm, err = quotas.List(nil)
assert.Nil(err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.Len(resp, 0)
must.SliceEmpty(t, resp)
}
func TestQuotas_List(t *testing.T) {
testutil.Parallel(t)
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
@@ -142,36 +139,36 @@ func TestQuotas_List(t *testing.T) {
qs1.Name = "fooaaa"
qs2.Name = "foobbb"
wm, err := quotas.Register(qs1, nil)
assert.Nil(err)
must.NoError(t, err)
assertWriteMeta(t, wm)
wm, err = quotas.Register(qs2, nil)
assert.Nil(err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Query the quotas
resp, qm, err := quotas.List(nil)
assert.Nil(err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.Len(resp, 2)
must.Len(t, 2, resp)
// Query the quotas using a prefix
resp, qm, err = quotas.PrefixList("foo", nil)
assert.Nil(err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.Len(resp, 2)
must.Len(t, 2, resp)
// Query the quotas using a prefix
resp, qm, err = quotas.PrefixList("foob", nil)
assert.Nil(err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.Len(resp, 1)
assert.Equal(qs2.Name, resp[0].Name)
must.Len(t, 1, resp)
must.Eq(t, qs2.Name, resp[0].Name)
}
func TestQuotas_ListUsages(t *testing.T) {
testutil.Parallel(t)
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
@@ -182,29 +179,29 @@ func TestQuotas_ListUsages(t *testing.T) {
qs1.Name = "fooaaa"
qs2.Name = "foobbb"
wm, err := quotas.Register(qs1, nil)
assert.Nil(err)
must.NoError(t, err)
assertWriteMeta(t, wm)
wm, err = quotas.Register(qs2, nil)
assert.Nil(err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Query the quotas
resp, qm, err := quotas.ListUsage(nil)
assert.Nil(err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.Len(resp, 2)
must.Len(t, 2, resp)
// Query the quotas using a prefix
resp, qm, err = quotas.PrefixListUsage("foo", nil)
assert.Nil(err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.Len(resp, 2)
must.Len(t, 2, resp)
// Query the quotas using a prefix
resp, qm, err = quotas.PrefixListUsage("foob", nil)
assert.Nil(err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.Len(resp, 1)
assert.Equal(qs2.Name, resp[0].Name)
must.Len(t, 1, resp)
must.Eq(t, qs2.Name, resp[0].Name)
}

View File

@@ -1,11 +1,10 @@
package api
import (
"reflect"
"testing"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/kr/pretty"
"github.com/shoenig/test/must"
)
func TestResources_Canonicalize(t *testing.T) {
@@ -49,9 +48,7 @@ func TestResources_Canonicalize(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
tc.input.Canonicalize()
if !reflect.DeepEqual(tc.input, tc.expected) {
t.Fatalf("Name: %v, Diffs:\n%v", tc.name, pretty.Diff(tc.expected, tc.input))
}
must.Eq(t, tc.expected, tc.input)
})
}
}

View File

@@ -4,12 +4,11 @@ import (
"testing"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestScalingPolicies_ListPolicies(t *testing.T) {
testutil.Parallel(t)
require := require.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
@@ -18,8 +17,8 @@ func TestScalingPolicies_ListPolicies(t *testing.T) {
// Check that we don't have any scaling policies before registering a job that has one
policies, _, err := scaling.ListPolicies(nil)
require.NoError(err)
require.Empty(policies, "expected 0 scaling policies, got: %d", len(policies))
must.NoError(t, err)
must.SliceEmpty(t, policies)
// Register a job with a scaling policy
job := testJob()
@@ -27,14 +26,12 @@ func TestScalingPolicies_ListPolicies(t *testing.T) {
Max: pointerOf(int64(100)),
}
_, _, err = jobs.Register(job, nil)
require.NoError(err)
must.NoError(t, err)
// Check that we have a scaling policy now
policies, _, err = scaling.ListPolicies(nil)
require.NoError(err)
if len(policies) != 1 {
t.Fatalf("expected 1 scaling policy, got: %d", len(policies))
}
must.NoError(t, err)
must.Len(t, 1, policies)
policy := policies[0]
@@ -43,21 +40,20 @@ func TestScalingPolicies_ListPolicies(t *testing.T) {
if job.Namespace != nil && *job.Namespace != "" {
namespace = *job.Namespace
}
require.Equal(policy.Target["Namespace"], namespace)
must.Eq(t, policy.Target["Namespace"], namespace)
// Check that the scaling policy references the right job
require.Equal(policy.Target["Job"], *job.ID)
must.Eq(t, policy.Target["Job"], *job.ID)
// Check that the scaling policy references the right group
require.Equal(policy.Target["Group"], *job.TaskGroups[0].Name)
must.Eq(t, policy.Target["Group"], *job.TaskGroups[0].Name)
// Check that the scaling policy has the right type
require.Equal(ScalingPolicyTypeHorizontal, policy.Type)
must.Eq(t, ScalingPolicyTypeHorizontal, policy.Type)
}
func TestScalingPolicies_GetPolicy(t *testing.T) {
testutil.Parallel(t)
require := require.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
@@ -66,13 +62,11 @@ func TestScalingPolicies_GetPolicy(t *testing.T) {
// Empty ID should return 404
_, _, err := scaling.GetPolicy("", nil)
require.Error(err)
require.Containsf(err.Error(), "404", "expected 404 error, got: %s", err.Error())
must.ErrorContains(t, err, "404")
// Inexistent ID should return 404
_, _, err = scaling.GetPolicy("i-dont-exist", nil)
require.Error(err)
require.Containsf(err.Error(), "404", "expected 404 error, got: %s", err.Error())
// Non-existent ID should return 404
_, _, err = scaling.GetPolicy("i-do-not-exist", nil)
must.ErrorContains(t, err, "404")
// Register a job with a scaling policy
job := testJob()
@@ -86,12 +80,12 @@ func TestScalingPolicies_GetPolicy(t *testing.T) {
}
job.TaskGroups[0].Scaling = policy
_, _, err = jobs.Register(job, nil)
require.NoError(err)
must.NoError(t, err)
// Find newly created scaling policy ID
var policyID string
policies, _, err := scaling.ListPolicies(nil)
require.NoError(err)
must.NoError(t, err)
for _, p := range policies {
if p.Target["Job"] == *job.ID {
policyID = p.ID
@@ -104,7 +98,7 @@ func TestScalingPolicies_GetPolicy(t *testing.T) {
// Fetch scaling policy
resp, _, err := scaling.GetPolicy(policyID, nil)
require.NoError(err)
must.NoError(t, err)
// Check that the scaling policy fields match
namespace := DefaultNamespace
@@ -116,10 +110,10 @@ func TestScalingPolicies_GetPolicy(t *testing.T) {
"Job": *job.ID,
"Group": *job.TaskGroups[0].Name,
}
require.Equal(expectedTarget, resp.Target)
require.Equal(policy.Policy, resp.Policy)
require.Equal(policy.Enabled, resp.Enabled)
require.Equal(*policy.Min, *resp.Min)
require.Equal(policy.Max, resp.Max)
require.Equal(ScalingPolicyTypeHorizontal, resp.Type)
must.Eq(t, expectedTarget, resp.Target)
must.Eq(t, policy.Policy, resp.Policy)
must.Eq(t, policy.Enabled, resp.Enabled)
must.Eq(t, *policy.Min, *resp.Min)
must.Eq(t, policy.Max, resp.Max)
must.Eq(t, ScalingPolicyTypeHorizontal, resp.Type)
}

View File

@@ -5,7 +5,7 @@ import (
"github.com/hashicorp/nomad/api/contexts"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestSearch_PrefixSearch(t *testing.T) {
@@ -16,18 +16,18 @@ func TestSearch_PrefixSearch(t *testing.T) {
job := testJob()
_, _, err := c.Jobs().Register(job, nil)
require.NoError(t, err)
must.NoError(t, err)
id := *job.ID
prefix := id[:len(id)-2]
resp, qm, err := c.Search().PrefixSearch(prefix, contexts.Jobs, nil)
require.NoError(t, err)
require.NotNil(t, qm)
require.NotNil(t, resp)
must.NoError(t, err)
must.NotNil(t, qm)
must.NotNil(t, resp)
jobMatches := resp.Matches[contexts.Jobs]
require.Len(t, jobMatches, 1)
require.Equal(t, id, jobMatches[0])
must.Len(t, 1, jobMatches)
must.Eq(t, id, jobMatches[0])
}
func TestSearch_FuzzySearch(t *testing.T) {
@@ -38,17 +38,15 @@ func TestSearch_FuzzySearch(t *testing.T) {
job := testJob()
_, _, err := c.Jobs().Register(job, nil)
require.NoError(t, err)
must.NoError(t, err)
resp, qm, err := c.Search().FuzzySearch("bin", contexts.All, nil)
require.NoError(t, err)
require.NotNil(t, qm)
require.NotNil(t, resp)
must.NoError(t, err)
must.NotNil(t, qm)
must.NotNil(t, resp)
commandMatches := resp.Matches[contexts.Commands]
require.Len(t, commandMatches, 1)
require.Equal(t, "/bin/sleep", commandMatches[0].ID)
require.Equal(t, []string{
"default", *job.ID, "group1", "task1",
}, commandMatches[0].Scope)
must.Len(t, 1, commandMatches)
must.Eq(t, "/bin/sleep", commandMatches[0].ID)
must.Eq(t, []string{"default", *job.ID, "group1", "task1"}, commandMatches[0].Scope)
}

View File

@@ -1,5 +1,4 @@
//go:build ent
// +build ent
package api
@@ -7,7 +6,7 @@ import (
"testing"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/assert"
"github.com/shoenig/test/must"
)
func TestSentinelPolicies_ListUpsert(t *testing.T) {
@@ -18,15 +17,9 @@ func TestSentinelPolicies_ListUpsert(t *testing.T) {
// Listing when nothing exists returns empty
result, qm, err := ap.List(nil)
if err != nil {
t.Fatalf("err: %s", err)
}
if qm.LastIndex != 1 {
t.Fatalf("bad index: %d", qm.LastIndex)
}
if n := len(result); n != 0 {
t.Fatalf("expected 0 policies, got: %d", n)
}
must.NoError(t, err)
must.Positive(t, qm.LastIndex)
must.SliceEmpty(t, result)
// Register a policy
policy := &SentinelPolicy{
@@ -37,22 +30,19 @@ func TestSentinelPolicies_ListUpsert(t *testing.T) {
Policy: "main = rule { true }",
}
wm, err := ap.Upsert(policy, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Check the list again
result, qm, err = ap.List(nil)
if err != nil {
t.Fatalf("err: %s", err)
}
must.NoError(t, err)
assertQueryMeta(t, qm)
if len(result) != 1 {
t.Fatalf("expected policy, got: %#v", result)
}
must.Len(t, 1, result)
}
func TestSentinelPolicies_Delete(t *testing.T) {
testutil.Parallel(t)
c, s, _ := makeACLClient(t, nil, nil)
defer s.Stop()
ap := c.SentinelPolicies()
@@ -66,27 +56,24 @@ func TestSentinelPolicies_Delete(t *testing.T) {
Policy: "main = rule { true } ",
}
wm, err := ap.Upsert(policy, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Delete the policy
wm, err = ap.Delete(policy.Name, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Check the list again
result, qm, err := ap.List(nil)
if err != nil {
t.Fatalf("err: %s", err)
}
must.NoError(t, err)
assertQueryMeta(t, qm)
if len(result) != 0 {
t.Fatalf("unexpected policy, got: %#v", result)
}
must.SliceEmpty(t, result)
}
func TestSentinelPolicies_Info(t *testing.T) {
testutil.Parallel(t)
c, s, _ := makeACLClient(t, nil, nil)
defer s.Stop()
ap := c.SentinelPolicies()
@@ -100,12 +87,12 @@ func TestSentinelPolicies_Info(t *testing.T) {
Policy: "main = rule { true }",
}
wm, err := ap.Upsert(policy, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertWriteMeta(t, wm)
// Query the policy
out, qm, err := ap.Info(policy.Name, nil)
assert.Nil(t, err)
must.NoError(t, err)
assertQueryMeta(t, qm)
assert.Equal(t, policy.Name, out.Name)
must.Eq(t, policy.Name, out.Name)
}

View File

@@ -6,7 +6,7 @@ import (
"time"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestServiceRegistrations_List(t *testing.T) {
@@ -33,13 +33,13 @@ func TestService_Canonicalize(t *testing.T) {
s.Canonicalize(task, tg, j)
require.Equal(t, fmt.Sprintf("%s-%s-%s", *j.Name, *tg.Name, task.Name), s.Name)
require.Equal(t, "auto", s.AddressMode)
require.Equal(t, OnUpdateRequireHealthy, s.OnUpdate)
require.Equal(t, ServiceProviderConsul, s.Provider)
require.Nil(t, s.Meta)
require.Nil(t, s.CanaryMeta)
require.Nil(t, s.TaggedAddresses)
must.Eq(t, fmt.Sprintf("%s-%s-%s", *j.Name, *tg.Name, task.Name), s.Name)
must.Eq(t, "auto", s.AddressMode)
must.Eq(t, OnUpdateRequireHealthy, s.OnUpdate)
must.Eq(t, ServiceProviderConsul, s.Provider)
must.Nil(t, s.Meta)
must.Nil(t, s.CanaryMeta)
must.Nil(t, s.TaggedAddresses)
}
func TestServiceCheck_Canonicalize(t *testing.T) {
@@ -57,8 +57,7 @@ func TestServiceCheck_Canonicalize(t *testing.T) {
}
s.Canonicalize(task, tg, j)
require.Equal(t, OnUpdateRequireHealthy, s.Checks[0].OnUpdate)
must.Eq(t, OnUpdateRequireHealthy, s.Checks[0].OnUpdate)
}
func TestService_Check_PassFail(t *testing.T) {
@@ -77,8 +76,8 @@ func TestService_Check_PassFail(t *testing.T) {
}
s.Canonicalize(task, tg, job)
require.Zero(t, s.Checks[0].SuccessBeforePassing)
require.Zero(t, s.Checks[0].FailuresBeforeCritical)
must.Zero(t, s.Checks[0].SuccessBeforePassing)
must.Zero(t, s.Checks[0].FailuresBeforeCritical)
})
t.Run("normal", func(t *testing.T) {
@@ -90,8 +89,8 @@ func TestService_Check_PassFail(t *testing.T) {
}
s.Canonicalize(task, tg, job)
require.Equal(t, 3, s.Checks[0].SuccessBeforePassing)
require.Equal(t, 4, s.Checks[0].FailuresBeforeCritical)
must.Eq(t, 3, s.Checks[0].SuccessBeforePassing)
must.Eq(t, 4, s.Checks[0].FailuresBeforeCritical)
})
}
@@ -132,17 +131,17 @@ func TestService_CheckRestart(t *testing.T) {
}
service.Canonicalize(task, tg, job)
require.Equal(t, service.Checks[0].CheckRestart.Limit, 22)
require.Equal(t, *service.Checks[0].CheckRestart.Grace, 22*time.Second)
require.True(t, service.Checks[0].CheckRestart.IgnoreWarnings)
must.Eq(t, 22, service.Checks[0].CheckRestart.Limit)
must.Eq(t, 22*time.Second, *service.Checks[0].CheckRestart.Grace)
must.True(t, service.Checks[0].CheckRestart.IgnoreWarnings)
require.Equal(t, service.Checks[1].CheckRestart.Limit, 33)
require.Equal(t, *service.Checks[1].CheckRestart.Grace, 33*time.Second)
require.True(t, service.Checks[1].CheckRestart.IgnoreWarnings)
must.Eq(t, 33, service.Checks[1].CheckRestart.Limit)
must.Eq(t, 33*time.Second, *service.Checks[1].CheckRestart.Grace)
must.True(t, service.Checks[1].CheckRestart.IgnoreWarnings)
require.Equal(t, service.Checks[2].CheckRestart.Limit, 11)
require.Equal(t, *service.Checks[2].CheckRestart.Grace, 11*time.Second)
require.True(t, service.Checks[2].CheckRestart.IgnoreWarnings)
must.Eq(t, 11, service.Checks[2].CheckRestart.Limit)
must.Eq(t, 11*time.Second, *service.Checks[2].CheckRestart.Grace)
must.True(t, service.Checks[2].CheckRestart.IgnoreWarnings)
}
func TestService_Connect_proxy_settings(t *testing.T) {
@@ -171,16 +170,15 @@ func TestService_Connect_proxy_settings(t *testing.T) {
service.Canonicalize(task, tg, job)
proxy := service.Connect.SidecarService.Proxy
require.Equal(t, proxy.Upstreams[0].DestinationName, "upstream")
require.Equal(t, proxy.Upstreams[0].LocalBindPort, 80)
require.Equal(t, proxy.Upstreams[0].Datacenter, "dc2")
require.Equal(t, proxy.Upstreams[0].LocalBindAddress, "127.0.0.2")
require.Equal(t, proxy.LocalServicePort, 8000)
must.Eq(t, "upstream", proxy.Upstreams[0].DestinationName)
must.Eq(t, 80, proxy.Upstreams[0].LocalBindPort)
must.Eq(t, "dc2", proxy.Upstreams[0].Datacenter)
must.Eq(t, "127.0.0.2", proxy.Upstreams[0].LocalBindAddress)
must.Eq(t, 8000, proxy.LocalServicePort)
}
func TestService_Tags(t *testing.T) {
testutil.Parallel(t)
r := require.New(t)
// canonicalize does not modify eto or tags
job := &Job{Name: pointerOf("job")}
@@ -193,7 +191,7 @@ func TestService_Tags(t *testing.T) {
}
service.Canonicalize(task, tg, job)
r.True(service.EnableTagOverride)
r.Equal([]string{"a", "b"}, service.Tags)
r.Equal([]string{"c", "d"}, service.CanaryTags)
must.True(t, service.EnableTagOverride)
must.Eq(t, []string{"a", "b"}, service.Tags)
must.Eq(t, []string{"c", "d"}, service.CanaryTags)
}

View File

@@ -4,20 +4,18 @@ import (
"testing"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/shoenig/test/must"
)
func TestStatus_Leader(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
status := c.Status()
// Query for leader status should return a result
out, err := status.Leader()
if err != nil {
t.Fatalf("err: %s", err)
}
if out == "" {
t.Fatalf("expected leader, got: %q", out)
}
must.NoError(t, err)
must.NotEq(t, "", out)
}

View File

@@ -4,14 +4,15 @@ import (
"testing"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/shoenig/test/must"
)
func TestSystem_GarbageCollect(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
e := c.System()
if err := e.GarbageCollect(); err != nil {
t.Fatal(err)
}
err := e.GarbageCollect()
must.NoError(t, err)
}

View File

@@ -2,41 +2,35 @@ package api
import (
"path/filepath"
"reflect"
"testing"
"time"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestTaskGroup_NewTaskGroup(t *testing.T) {
testutil.Parallel(t)
grp := NewTaskGroup("grp1", 2)
expect := &TaskGroup{
Name: pointerOf("grp1"),
Count: pointerOf(2),
}
if !reflect.DeepEqual(grp, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, grp)
}
must.Eq(t, expect, grp)
}
func TestTaskGroup_Constrain(t *testing.T) {
testutil.Parallel(t)
grp := NewTaskGroup("grp1", 1)
// Add a constraint to the group
out := grp.Constrain(NewConstraint("kernel.name", "=", "darwin"))
if n := len(grp.Constraints); n != 1 {
t.Fatalf("expected 1 constraint, got: %d", n)
}
must.Len(t, 1, grp.Constraints)
// Check that the group was returned
if out != grp {
t.Fatalf("expected: %#v, got: %#v", grp, out)
}
must.Eq(t, grp, out)
// Add a second constraint
grp.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
@@ -52,25 +46,20 @@ func TestTaskGroup_Constrain(t *testing.T) {
Operand: ">=",
},
}
if !reflect.DeepEqual(grp.Constraints, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, grp.Constraints)
}
must.Eq(t, expect, grp.Constraints)
}
func TestTaskGroup_AddAffinity(t *testing.T) {
testutil.Parallel(t)
grp := NewTaskGroup("grp1", 1)
// Add an affinity to the group
out := grp.AddAffinity(NewAffinity("kernel.version", "=", "4.6", 100))
if n := len(grp.Affinities); n != 1 {
t.Fatalf("expected 1 affinity, got: %d", n)
}
must.Len(t, 1, grp.Affinities)
// Check that the group was returned
if out != grp {
t.Fatalf("expected: %#v, got: %#v", grp, out)
}
must.Eq(t, grp, out)
// Add a second affinity
grp.AddAffinity(NewAffinity("${node.affinity}", "=", "dc2", 50))
@@ -88,36 +77,30 @@ func TestTaskGroup_AddAffinity(t *testing.T) {
Weight: pointerOf(int8(50)),
},
}
if !reflect.DeepEqual(grp.Affinities, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, grp.Constraints)
}
must.Eq(t, expect, grp.Affinities)
}
func TestTaskGroup_SetMeta(t *testing.T) {
testutil.Parallel(t)
grp := NewTaskGroup("grp1", 1)
// Initializes an empty map
out := grp.SetMeta("foo", "bar")
if grp.Meta == nil {
t.Fatalf("should be initialized")
}
must.NotNil(t, grp.Meta)
// Check that we returned the group
if out != grp {
t.Fatalf("expect: %#v, got: %#v", grp, out)
}
must.Eq(t, grp, out)
// Add a second meta k/v
grp.SetMeta("baz", "zip")
expect := map[string]string{"foo": "bar", "baz": "zip"}
if !reflect.DeepEqual(grp.Meta, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, grp.Meta)
}
must.Eq(t, expect, grp.Meta)
}
func TestTaskGroup_AddSpread(t *testing.T) {
testutil.Parallel(t)
grp := NewTaskGroup("grp1", 1)
// Create and add spread
@@ -125,14 +108,10 @@ func TestTaskGroup_AddSpread(t *testing.T) {
spread := NewSpread("${meta.rack}", 100, []*SpreadTarget{spreadTarget})
out := grp.AddSpread(spread)
if n := len(grp.Spreads); n != 1 {
t.Fatalf("expected 1 spread, got: %d", n)
}
must.Len(t, 1, grp.Spreads)
// Check that the group was returned
if out != grp {
t.Fatalf("expected: %#v, got: %#v", grp, out)
}
must.Eq(t, grp, out)
// Add a second spread
spreadTarget2 := NewSpreadTarget("dc1", 100)
@@ -162,25 +141,20 @@ func TestTaskGroup_AddSpread(t *testing.T) {
},
},
}
if !reflect.DeepEqual(grp.Spreads, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, grp.Spreads)
}
must.Eq(t, expect, grp.Spreads)
}
func TestTaskGroup_AddTask(t *testing.T) {
testutil.Parallel(t)
grp := NewTaskGroup("grp1", 1)
// Add the task to the task group
out := grp.AddTask(NewTask("task1", "java"))
if n := len(grp.Tasks); n != 1 {
t.Fatalf("expected 1 task, got: %d", n)
}
must.Len(t, 1, out.Tasks)
// Check that we returned the group
if out != grp {
t.Fatalf("expect: %#v, got: %#v", grp, out)
}
must.Eq(t, grp, out)
// Add a second task
grp.AddTask(NewTask("task2", "exec"))
@@ -194,71 +168,59 @@ func TestTaskGroup_AddTask(t *testing.T) {
Driver: "exec",
},
}
if !reflect.DeepEqual(grp.Tasks, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, grp.Tasks)
}
must.Eq(t, expect, grp.Tasks)
}
func TestTask_NewTask(t *testing.T) {
testutil.Parallel(t)
task := NewTask("task1", "exec")
expect := &Task{
Name: "task1",
Driver: "exec",
}
if !reflect.DeepEqual(task, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, task)
}
must.Eq(t, expect, task)
}
func TestTask_SetConfig(t *testing.T) {
testutil.Parallel(t)
task := NewTask("task1", "exec")
// Initializes an empty map
out := task.SetConfig("foo", "bar")
if task.Config == nil {
t.Fatalf("should be initialized")
}
must.NotNil(t, task.Config)
// Check that we returned the task
if out != task {
t.Fatalf("expect: %#v, got: %#v", task, out)
}
must.Eq(t, task, out)
// Set another config value
task.SetConfig("baz", "zip")
expect := map[string]interface{}{"foo": "bar", "baz": "zip"}
if !reflect.DeepEqual(task.Config, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, task.Config)
}
must.Eq(t, expect, task.Config)
}
func TestTask_SetMeta(t *testing.T) {
testutil.Parallel(t)
task := NewTask("task1", "exec")
// Initializes an empty map
out := task.SetMeta("foo", "bar")
if task.Meta == nil {
t.Fatalf("should be initialized")
}
must.NotNil(t, out)
// Check that we returned the task
if out != task {
t.Fatalf("expect: %#v, got: %#v", task, out)
}
must.Eq(t, task, out)
// Set another meta k/v
task.SetMeta("baz", "zip")
expect := map[string]string{"foo": "bar", "baz": "zip"}
if !reflect.DeepEqual(task.Meta, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, task.Meta)
}
must.Eq(t, expect, task.Meta)
}
func TestTask_Require(t *testing.T) {
testutil.Parallel(t)
task := NewTask("task1", "exec")
// Create some require resources
@@ -275,30 +237,23 @@ func TestTask_Require(t *testing.T) {
},
}
out := task.Require(resources)
if !reflect.DeepEqual(task.Resources, resources) {
t.Fatalf("expect: %#v, got: %#v", resources, task.Resources)
}
must.Eq(t, resources, task.Resources)
// Check that we returned the task
if out != task {
t.Fatalf("expect: %#v, got: %#v", task, out)
}
must.Eq(t, task, out)
}
func TestTask_Constrain(t *testing.T) {
testutil.Parallel(t)
task := NewTask("task1", "exec")
// Add a constraint to the task
out := task.Constrain(NewConstraint("kernel.name", "=", "darwin"))
if n := len(task.Constraints); n != 1 {
t.Fatalf("expected 1 constraint, got: %d", n)
}
must.Len(t, 1, task.Constraints)
// Check that the task was returned
if out != task {
t.Fatalf("expected: %#v, got: %#v", task, out)
}
must.Eq(t, task, out)
// Add a second constraint
task.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
@@ -314,24 +269,20 @@ func TestTask_Constrain(t *testing.T) {
Operand: ">=",
},
}
if !reflect.DeepEqual(task.Constraints, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, task.Constraints)
}
must.Eq(t, expect, task.Constraints)
}
func TestTask_AddAffinity(t *testing.T) {
testutil.Parallel(t)
task := NewTask("task1", "exec")
// Add an affinity to the task
out := task.AddAffinity(NewAffinity("kernel.version", "=", "4.6", 100))
require := require.New(t)
require.Len(out.Affinities, 1)
must.Len(t, 1, out.Affinities)
// Check that the task was returned
if out != task {
t.Fatalf("expected: %#v, got: %#v", task, out)
}
must.Eq(t, task, out)
// Add a second affinity
task.AddAffinity(NewAffinity("${node.datacenter}", "=", "dc2", 50))
@@ -349,13 +300,12 @@ func TestTask_AddAffinity(t *testing.T) {
Weight: pointerOf(int8(50)),
},
}
if !reflect.DeepEqual(task.Affinities, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, task.Affinities)
}
must.Eq(t, expect, task.Affinities)
}
func TestTask_Artifact(t *testing.T) {
testutil.Parallel(t)
a := TaskArtifact{
GetterSource: pointerOf("http://localhost/foo.txt"),
GetterMode: pointerOf("file"),
@@ -363,22 +313,24 @@ func TestTask_Artifact(t *testing.T) {
GetterOptions: make(map[string]string),
}
a.Canonicalize()
require.Equal(t, "file", *a.GetterMode)
require.Equal(t, "local/foo.txt", filepath.ToSlash(*a.RelativeDest))
require.Nil(t, a.GetterOptions)
require.Nil(t, a.GetterHeaders)
must.Eq(t, "file", *a.GetterMode)
must.Eq(t, "local/foo.txt", filepath.ToSlash(*a.RelativeDest))
must.Nil(t, a.GetterOptions)
must.Nil(t, a.GetterHeaders)
}
func TestTask_VolumeMount(t *testing.T) {
testutil.Parallel(t)
vm := &VolumeMount{}
vm := new(VolumeMount)
vm.Canonicalize()
require.NotNil(t, vm.PropagationMode)
require.Equal(t, *vm.PropagationMode, "private")
must.NotNil(t, vm.PropagationMode)
must.Eq(t, "private", *vm.PropagationMode)
}
func TestTask_Canonicalize_TaskLifecycle(t *testing.T) {
testutil.Parallel(t)
testCases := []struct {
name string
expected *TaskLifecycle
@@ -402,14 +354,14 @@ func TestTask_Canonicalize_TaskLifecycle(t *testing.T) {
ID: pointerOf("test"),
}
tc.task.Canonicalize(tg, j)
require.Equal(t, tc.expected, tc.task.Lifecycle)
must.Eq(t, tc.expected, tc.task.Lifecycle)
})
}
}
func TestTask_Template_WaitConfig_Canonicalize_and_Copy(t *testing.T) {
testutil.Parallel(t)
taskWithWait := func(wc *WaitConfig) *Task {
return &Task{
Templates: []*Template{
@@ -487,9 +439,9 @@ func TestTask_Template_WaitConfig_Canonicalize_and_Copy(t *testing.T) {
j := &Job{
ID: pointerOf("test"),
}
require.Equal(t, tc.copied, tc.task.Templates[0].Wait.Copy())
must.Eq(t, tc.copied, tc.task.Templates[0].Wait.Copy())
tc.task.Canonicalize(tg, j)
require.Equal(t, tc.canonicalized, tc.task.Templates[0].Wait)
must.Eq(t, tc.canonicalized, tc.task.Templates[0].Wait)
})
}
}
@@ -515,7 +467,7 @@ func TestTask_Canonicalize_Vault(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
tc.input.Canonicalize()
require.Equal(t, tc.expected, tc.input)
must.Eq(t, tc.expected, tc.input)
})
}
}
@@ -523,6 +475,7 @@ func TestTask_Canonicalize_Vault(t *testing.T) {
// Ensures no regression on https://github.com/hashicorp/nomad/issues/3132
func TestTaskGroup_Canonicalize_Update(t *testing.T) {
testutil.Parallel(t)
// Job with an Empty() Update
job := &Job{
ID: pointerOf("test"),
@@ -543,13 +496,12 @@ func TestTaskGroup_Canonicalize_Update(t *testing.T) {
Name: pointerOf("foo"),
}
tg.Canonicalize(job)
assert.NotNil(t, job.Update)
assert.Nil(t, tg.Update)
must.NotNil(t, job.Update)
must.Nil(t, tg.Update)
}
func TestTaskGroup_Canonicalize_Scaling(t *testing.T) {
testutil.Parallel(t)
require := require.New(t)
job := &Job{
ID: pointerOf("test"),
@@ -571,41 +523,42 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) {
// both nil => both == 1
tg.Canonicalize(job)
require.NotNil(tg.Count)
require.NotNil(tg.Scaling.Min)
require.EqualValues(1, *tg.Count)
require.EqualValues(*tg.Count, *tg.Scaling.Min)
must.Positive(t, *tg.Count)
must.NotNil(t, tg.Scaling.Min)
must.Eq(t, 1, *tg.Count)
must.Eq(t, int64(*tg.Count), *tg.Scaling.Min)
// count == nil => count = Scaling.Min
tg.Count = nil
tg.Scaling.Min = pointerOf(int64(5))
tg.Canonicalize(job)
require.NotNil(tg.Count)
require.NotNil(tg.Scaling.Min)
require.EqualValues(5, *tg.Count)
require.EqualValues(*tg.Count, *tg.Scaling.Min)
must.Positive(t, *tg.Count)
must.NotNil(t, tg.Scaling.Min)
must.Eq(t, 5, *tg.Count)
must.Eq(t, int64(*tg.Count), *tg.Scaling.Min)
// Scaling.Min == nil => Scaling.Min == count
tg.Count = pointerOf(5)
tg.Scaling.Min = nil
tg.Canonicalize(job)
require.NotNil(tg.Count)
require.NotNil(tg.Scaling.Min)
require.EqualValues(5, *tg.Scaling.Min)
require.EqualValues(*tg.Scaling.Min, *tg.Count)
must.Positive(t, *tg.Count)
must.NotNil(t, tg.Scaling.Min)
must.Eq(t, 5, *tg.Scaling.Min)
must.Eq(t, int64(*tg.Count), *tg.Scaling.Min)
// both present, both persisted
tg.Count = pointerOf(5)
tg.Scaling.Min = pointerOf(int64(1))
tg.Canonicalize(job)
require.NotNil(tg.Count)
require.NotNil(tg.Scaling.Min)
require.EqualValues(1, *tg.Scaling.Min)
require.EqualValues(5, *tg.Count)
must.Positive(t, *tg.Count)
must.NotNil(t, tg.Scaling.Min)
must.Eq(t, 1, *tg.Scaling.Min)
must.Eq(t, 5, *tg.Count)
}
func TestTaskGroup_Merge_Update(t *testing.T) {
testutil.Parallel(t)
job := &Job{
ID: pointerOf("test"),
Update: &UpdateStrategy{},
@@ -623,7 +576,7 @@ func TestTaskGroup_Merge_Update(t *testing.T) {
}
tg.Canonicalize(job)
require.Equal(t, &UpdateStrategy{
must.Eq(t, &UpdateStrategy{
AutoRevert: pointerOf(true),
AutoPromote: pointerOf(false),
Canary: pointerOf(5),
@@ -639,6 +592,7 @@ func TestTaskGroup_Merge_Update(t *testing.T) {
// Verifies that migrate strategy is merged correctly
func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
testutil.Parallel(t)
type testCase struct {
desc string
jobType string
@@ -784,7 +738,7 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
Migrate: tc.taskMigrate,
}
tg.Canonicalize(job)
assert.Equal(t, tc.expected, tg.Migrate)
must.Eq(t, tc.expected, tg.Migrate)
})
}
}
@@ -792,6 +746,7 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
// TestSpread_Canonicalize asserts that the spread stanza is canonicalized correctly
func TestSpread_Canonicalize(t *testing.T) {
testutil.Parallel(t)
job := &Job{
ID: pointerOf("test"),
Type: pointerOf("batch"),
@@ -837,7 +792,7 @@ func TestSpread_Canonicalize(t *testing.T) {
tg.Spreads = []*Spread{tc.spread}
tg.Canonicalize(job)
for _, spr := range tg.Spreads {
require.Equal(t, tc.expectedWeight, *spr.Weight)
must.Eq(t, tc.expectedWeight, *spr.Weight)
}
})
}
@@ -845,6 +800,7 @@ func TestSpread_Canonicalize(t *testing.T) {
func Test_NewDefaultReschedulePolicy(t *testing.T) {
testutil.Parallel(t)
testCases := []struct {
desc string
inputJobType string
@@ -903,13 +859,14 @@ func Test_NewDefaultReschedulePolicy(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
actual := NewDefaultReschedulePolicy(tc.inputJobType)
assert.Equal(t, tc.expected, actual)
must.Eq(t, tc.expected, actual)
})
}
}
func TestTaskGroup_Canonicalize_Consul(t *testing.T) {
testutil.Parallel(t)
t.Run("override job consul in group", func(t *testing.T) {
job := &Job{
ID: pointerOf("job"),
@@ -923,8 +880,8 @@ func TestTaskGroup_Canonicalize_Consul(t *testing.T) {
}
tg.Canonicalize(job)
require.Equal(t, "ns1", *job.ConsulNamespace)
require.Equal(t, "ns2", tg.Consul.Namespace)
must.Eq(t, "ns1", *job.ConsulNamespace)
must.Eq(t, "ns2", tg.Consul.Namespace)
})
t.Run("inherit job consul in group", func(t *testing.T) {
@@ -940,8 +897,8 @@ func TestTaskGroup_Canonicalize_Consul(t *testing.T) {
}
tg.Canonicalize(job)
require.Equal(t, "ns1", *job.ConsulNamespace)
require.Equal(t, "ns1", tg.Consul.Namespace)
must.Eq(t, "ns1", *job.ConsulNamespace)
must.Eq(t, "ns1", tg.Consul.Namespace)
})
t.Run("set in group only", func(t *testing.T) {
@@ -957,7 +914,7 @@ func TestTaskGroup_Canonicalize_Consul(t *testing.T) {
}
tg.Canonicalize(job)
require.Empty(t, job.ConsulNamespace)
require.Equal(t, "ns2", tg.Consul.Namespace)
must.Eq(t, "", *job.ConsulNamespace)
must.Eq(t, "ns2", tg.Consul.Namespace)
})
}

View File

@@ -5,11 +5,11 @@ import (
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/shoenig/test/must"
"github.com/stretchr/testify/require"
)
func TestFormatRoundedFloat(t *testing.T) {
testutil.Parallel(t)
cases := []struct {
input float64
expected string
@@ -37,7 +37,7 @@ func TestFormatRoundedFloat(t *testing.T) {
}
for _, c := range cases {
require.Equal(t, c.expected, formatFloat(c.input, 3))
must.Eq(t, c.expected, formatFloat(c.input, 3))
}
}

View File

@@ -1,16 +1,18 @@
package api
import (
"errors"
"fmt"
"testing"
"time"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestVariables_SimpleCRUD(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
@@ -26,22 +28,19 @@ func TestVariables_SimpleCRUD(t *testing.T) {
sv2.Items["k2"] = "otherv2"
t.Run("1 fail create when no items", func(t *testing.T) {
_, _, err := nsv.Create(&Variable{Path: "bad/var"}, nil)
require.Error(t, err)
require.EqualError(t, err, "Unexpected response code: 400 (variable missing required Items object)")
must.ErrorContains(t, err, "Unexpected response code: 400 (variable missing required Items object)")
})
t.Run("2 create sv1", func(t *testing.T) {
get, _, err := nsv.Create(sv1, nil)
require.NoError(t, err)
require.NotNil(t, get)
require.NotZero(t, get.CreateIndex)
require.NotZero(t, get.CreateTime)
require.NotZero(t, get.ModifyIndex)
require.NotZero(t, get.ModifyTime)
require.Equal(t, sv1.Items, get.Items)
must.NoError(t, err)
must.NotNil(t, get)
must.Positive(t, get.CreateIndex)
must.Positive(t, get.CreateTime)
must.Positive(t, get.ModifyIndex)
must.Positive(t, get.ModifyTime)
must.Eq(t, sv1.Items, get.Items)
*sv1 = *get
})
@@ -49,7 +48,7 @@ func TestVariables_SimpleCRUD(t *testing.T) {
var err error
sv2, _, err = nsv.Create(sv2, nil)
require.NoError(t, err)
must.NoError(t, err)
})
// TODO: Need to prevent no-op modifications from happening server-side
@@ -67,59 +66,55 @@ func TestVariables_SimpleCRUD(t *testing.T) {
sv1.Items["new-hotness"] = "yeah!"
get, _, err := nsv.Update(sv1, nil)
require.NoError(t, err)
require.NotNil(t, get)
require.NotEqual(t, sv1.ModifyIndex, get.ModifyIndex, "ModifyIndex should change")
require.Equal(t, sv1.Items, get.Items)
must.NoError(t, err)
must.NotNil(t, get)
must.NotEq(t, sv1.ModifyIndex, get.ModifyIndex)
must.Eq(t, sv1.Items, get.Items)
*sv1 = *get
})
t.Run("5 list vars", func(t *testing.T) {
l, _, err := nsv.List(nil)
require.NoError(t, err)
require.Len(t, l, 2)
require.ElementsMatch(t, []*VariableMetadata{sv1.Metadata(), sv2.Metadata()}, l)
must.NoError(t, err)
must.Len(t, 2, l)
must.Eq(t, []*VariableMetadata{sv1.Metadata(), sv2.Metadata()}, l)
})
t.Run("5a list vars opts", func(t *testing.T) {
// Since there are two vars in the backend, we should
// get a NextToken with a page size of 1
l, qm, err := nsv.List(&QueryOptions{PerPage: 1})
require.NoError(t, err)
require.Len(t, l, 1)
require.Equal(t, sv1.Metadata(), l[0])
require.NotNil(t, qm.NextToken)
must.NoError(t, err)
must.Len(t, 1, l)
must.Eq(t, sv1.Metadata(), l[0])
must.NotNil(t, qm.NextToken)
})
t.Run("5b prefixlist", func(t *testing.T) {
l, _, err := nsv.PrefixList("my", nil)
require.NoError(t, err)
require.Len(t, l, 1)
require.Equal(t, sv1.Metadata(), l[0])
must.NoError(t, err)
must.Len(t, 1, l)
must.Eq(t, sv1.Metadata(), l[0])
})
t.Run("6 delete sv1", func(t *testing.T) {
_, err := nsv.Delete(sv1.Path, nil)
require.NoError(t, err)
must.NoError(t, err)
_, _, err = nsv.Read(sv1.Path, nil)
require.EqualError(t, err, ErrVariableNotFound)
must.ErrorContains(t, err, ErrVariableNotFound)
})
t.Run("7 list vars after delete", func(t *testing.T) {
l, _, err := nsv.List(nil)
require.NoError(t, err)
require.NotNil(t, l)
require.Len(t, l, 1)
must.NoError(t, err)
must.NotNil(t, l)
must.Len(t, 1, l)
})
}
func TestVariables_CRUDWithCAS(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
@@ -134,13 +129,13 @@ func TestVariables_CRUDWithCAS(t *testing.T) {
// Create sv1: should pass without issue
get, _, err := nsv.Create(sv1, nil)
require.NoError(t, err)
require.NotNil(t, get)
require.NotZero(t, get.CreateIndex)
require.NotZero(t, get.CreateTime)
require.NotZero(t, get.ModifyIndex)
require.NotZero(t, get.ModifyTime)
require.Equal(t, sv1.Items, get.Items)
must.NoError(t, err)
must.NotNil(t, get)
must.Positive(t, get.CreateIndex)
must.Positive(t, get.CreateTime)
must.Positive(t, get.ModifyIndex)
must.Positive(t, get.ModifyTime)
must.Eq(t, sv1.Items, get.Items)
// Update sv1 with CAS:
@@ -148,33 +143,32 @@ func TestVariables_CRUDWithCAS(t *testing.T) {
oobUpdate := sv1.Copy()
oobUpdate.Items["new-hotness"] = "yeah!"
nowVal, _, err := nsv.Update(oobUpdate, nil)
require.NoError(t, err)
must.NoError(t, err)
// - try to do an update with sv1's old state; should fail
_, _, err = nsv.CheckedUpdate(sv1, nil)
require.Error(t, err)
must.Error(t, err)
// - expect the error to be an ErrCASConflict, so we can cast
// to it and retrieve the Conflict value
var conflictErr ErrCASConflict
require.ErrorAs(t, err, &conflictErr)
require.Equal(t, nowVal, conflictErr.Conflict)
must.True(t, errors.As(err, &conflictErr))
must.Eq(t, nowVal, conflictErr.Conflict)
// Delete CAS: try to delete sv1 at old ModifyIndex; should
// return an ErrCASConflict. Check Conflict.
_, err = nsv.CheckedDelete(sv1.Path, sv1.ModifyIndex, nil)
require.Error(t, err)
require.ErrorAs(t, err, &conflictErr)
require.Equal(t, nowVal, conflictErr.Conflict)
must.True(t, errors.As(err, &conflictErr))
must.Eq(t, nowVal, conflictErr.Conflict)
// Delete CAS: delete at the current index; should succeed.
_, err = nsv.CheckedDelete(sv1.Path, nowVal.ModifyIndex, nil)
require.NoError(t, err)
must.NoError(t, err)
}
func TestVariables_Read(t *testing.T) {
testutil.Parallel(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
@@ -215,16 +209,16 @@ func TestVariables_Read(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
get, _, err := nsv.Read(tc.path, nil)
if tc.expectedError != "" {
require.EqualError(t, err, tc.expectedError)
must.EqError(t, err, tc.expectedError)
} else {
require.NoError(t, err)
must.NoError(t, err)
}
if tc.checkValue {
if tc.expectedValue != nil {
require.NotNil(t, get)
require.Equal(t, tc.expectedValue, get)
must.NotNil(t, get)
must.Eq(t, tc.expectedValue, get)
} else {
require.Nil(t, get)
must.Nil(t, get)
}
}
})
@@ -233,8 +227,8 @@ func TestVariables_Read(t *testing.T) {
func writeTestVariable(t *testing.T, c *Client, sv *Variable) {
_, err := c.write("/v1/var/"+sv.Path, sv, sv, nil)
require.NoError(t, err, "Error writing test variable")
require.NoError(t, err, "Error writing test variable")
must.NoError(t, err, must.Sprint("failed writing test variable"))
must.NoError(t, err, must.Sprint("failed writing test variable"))
}
func TestVariable_CreateReturnsContent(t *testing.T) {
@@ -248,7 +242,7 @@ func TestVariable_CreateReturnsContent(t *testing.T) {
sv1.Items["k2"] = "v2"
sv1n, _, err := nsv.Create(sv1, nil)
require.NoError(t, err)
require.NotNil(t, sv1n)
require.Equal(t, sv1.Items, sv1n.Items)
must.NoError(t, err)
must.NotNil(t, sv1n)
must.Eq(t, sv1.Items, sv1n.Items)
}