mirror of
https://github.com/kemko/nomad.git
synced 2026-01-04 17:35:43 +03:00
E2E: refactor Connect tests to use stdlib testing (#20278)
Migrate our E2E tests for Connect off the old framework in preparation for writing E2E tests for transparent proxy and the updated workload identity workflow. Mark the tests that cover the legacy Consul token submitted workflow. Ref: https://github.com/hashicorp/nomad/pull/20175
This commit is contained in:
@@ -1,451 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package connect
|
||||
|
||||
import (
|
||||
"os"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
uuidparse "github.com/hashicorp/go-uuid"
|
||||
nomadapi "github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/jobspec"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type ConnectACLsE2ETest struct {
|
||||
framework.TC
|
||||
|
||||
// used to store the root token so we can reset the client back to
|
||||
// it as needed
|
||||
consulManagementToken string
|
||||
|
||||
// things to cleanup after each test case
|
||||
jobIDs []string
|
||||
consulPolicyIDs []string
|
||||
consulTokenIDs []string
|
||||
consulNamespace string
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) BeforeAll(f *framework.F) {
|
||||
// Wait for Nomad to be ready before doing anything.
|
||||
e2eutil.WaitForLeader(f.T(), tc.Nomad())
|
||||
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 2)
|
||||
|
||||
// Validate the consul root token exists, otherwise tests are just
|
||||
// going to be a train wreck.
|
||||
tc.consulManagementToken = os.Getenv(envConsulToken)
|
||||
|
||||
_, err := uuidparse.ParseUUID(tc.consulManagementToken)
|
||||
f.NoError(err, "CONSUL_HTTP_TOKEN not set")
|
||||
|
||||
// ensure SI tokens from previous test cases were removed
|
||||
f.Eventually(func() bool {
|
||||
siTokens := tc.countSITokens(f.T())
|
||||
f.T().Log("cleanup: checking for remaining SI tokens:", siTokens)
|
||||
return len(siTokens) == 0
|
||||
}, 2*time.Minute, 2*time.Second, "SI tokens did not get removed")
|
||||
}
|
||||
|
||||
// AfterEach does cleanup of Consul ACL objects that were created during each
|
||||
// test case. Each test case may assume it is starting from a "fresh" state -
|
||||
// as if the consul ACL bootstrap process had just taken place.
|
||||
func (tc *ConnectACLsE2ETest) AfterEach(f *framework.F) {
|
||||
if os.Getenv("NOMAD_TEST_SKIPCLEANUP") == "1" {
|
||||
return
|
||||
}
|
||||
|
||||
t := f.T()
|
||||
|
||||
// cleanup jobs
|
||||
for _, id := range tc.jobIDs {
|
||||
t.Log("cleanup: deregister nomad job id:", id)
|
||||
_, _, err := tc.Nomad().Jobs().Deregister(id, true, nil)
|
||||
f.NoError(err)
|
||||
}
|
||||
|
||||
// cleanup consul tokens
|
||||
for _, id := range tc.consulTokenIDs {
|
||||
t.Log("cleanup: delete consul token id:", id)
|
||||
_, err := tc.Consul().ACL().TokenDelete(id, &consulapi.WriteOptions{
|
||||
Token: tc.consulManagementToken,
|
||||
Namespace: tc.consulNamespace,
|
||||
})
|
||||
f.NoError(err)
|
||||
}
|
||||
|
||||
// cleanup consul policies
|
||||
for _, id := range tc.consulPolicyIDs {
|
||||
t.Log("cleanup: delete consul policy id:", id)
|
||||
_, err := tc.Consul().ACL().PolicyDelete(id, &consulapi.WriteOptions{
|
||||
Token: tc.consulManagementToken,
|
||||
Namespace: tc.consulNamespace,
|
||||
})
|
||||
f.NoError(err)
|
||||
}
|
||||
|
||||
if tc.consulNamespace != "" {
|
||||
t.Log("cleanup: delete consul namespace:", tc.consulNamespace)
|
||||
_, err := tc.Consul().Namespaces().Delete(tc.consulNamespace, &consulapi.WriteOptions{
|
||||
Token: tc.consulManagementToken,
|
||||
})
|
||||
f.NoError(err)
|
||||
}
|
||||
|
||||
// do garbage collection
|
||||
err := tc.Nomad().System().GarbageCollect()
|
||||
f.NoError(err)
|
||||
|
||||
// assert there are no leftover SI tokens, which may take a minute to be
|
||||
// cleaned up
|
||||
f.Eventually(func() bool {
|
||||
siTokens := tc.countSITokens(t)
|
||||
t.Log("cleanup: checking for remaining SI tokens:", siTokens)
|
||||
return len(siTokens) == 0
|
||||
}, 2*time.Minute, 2*time.Second, "SI tokens did not get removed")
|
||||
|
||||
tc.jobIDs = []string{}
|
||||
tc.consulTokenIDs = []string{}
|
||||
tc.consulPolicyIDs = []string{}
|
||||
tc.consulNamespace = ""
|
||||
}
|
||||
|
||||
// todo(shoenig): follow up refactor with e2eutil.ConsulPolicy
|
||||
type consulPolicy struct {
|
||||
Name string // e.g. nomad-operator
|
||||
Rules string // e.g. service "" { policy="write" }
|
||||
Namespace string // e.g. default
|
||||
}
|
||||
|
||||
// todo(shoenig): follow up refactor with e2eutil.ConsulPolicy
|
||||
func (tc *ConnectACLsE2ETest) createConsulPolicy(p consulPolicy, f *framework.F) string {
|
||||
result, _, err := tc.Consul().ACL().PolicyCreate(&consulapi.ACLPolicy{
|
||||
Name: p.Name,
|
||||
Description: "test policy " + p.Name,
|
||||
Rules: p.Rules,
|
||||
Namespace: p.Namespace,
|
||||
}, &consulapi.WriteOptions{Token: tc.consulManagementToken})
|
||||
f.NoError(err, "failed to create consul policy")
|
||||
tc.consulPolicyIDs = append(tc.consulPolicyIDs, result.ID)
|
||||
return result.ID
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) createConsulNamespace(namespace string, f *framework.F) string {
|
||||
result, _, err := tc.Consul().Namespaces().Create(&consulapi.Namespace{
|
||||
Name: namespace,
|
||||
}, &consulapi.WriteOptions{Token: tc.consulManagementToken})
|
||||
f.NoError(err, "failed to create consul namespace")
|
||||
return result.Name
|
||||
}
|
||||
|
||||
// todo(shoenig): follow up refactor with e2eutil.ConsulPolicy
|
||||
func (tc *ConnectACLsE2ETest) createOperatorToken(policyID string, f *framework.F) string {
|
||||
return tc.createOperatorTokenNamespaced(policyID, "default", f)
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) createOperatorTokenNamespaced(policyID string, namespace string, f *framework.F) string {
|
||||
token, _, err := tc.Consul().ACL().TokenCreate(&consulapi.ACLToken{
|
||||
Description: "operator token",
|
||||
Policies: []*consulapi.ACLTokenPolicyLink{{ID: policyID}},
|
||||
Namespace: namespace,
|
||||
}, &consulapi.WriteOptions{Token: tc.consulManagementToken})
|
||||
f.NoError(err, "failed to create operator token")
|
||||
tc.consulTokenIDs = append(tc.consulTokenIDs, token.AccessorID)
|
||||
return token.SecretID
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterMasterToken(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
t.Log("test register Connect job w/ ACLs enabled w/ master token")
|
||||
|
||||
jobID := "connect" + uuid.Generate()[0:8]
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
jobAPI := tc.Nomad().Jobs()
|
||||
|
||||
job, err := jobspec.ParseFile(demoConnectJob)
|
||||
f.NoError(err)
|
||||
|
||||
// Set the job file to use the consul master token.
|
||||
// One should never do this in practice, but, it should work.
|
||||
// https://www.consul.io/docs/acl/acl-system.html#builtin-tokens
|
||||
job.ConsulToken = &tc.consulManagementToken
|
||||
job.ID = &jobID
|
||||
|
||||
// Avoid using Register here, because that would actually create and run the
|
||||
// Job which runs the task, creates the SI token, which all needs to be
|
||||
// given time to settle and cleaned up. That is all covered in the big slow
|
||||
// test at the bottom.
|
||||
resp, _, err := jobAPI.Plan(job, false, nil)
|
||||
f.NoError(err)
|
||||
f.NotNil(resp)
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterMissingOperatorToken(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
t.Skip("we don't have consul.allow_unauthenticated=false set because it would required updating every E2E test to pass a Consul token")
|
||||
|
||||
t.Log("test register Connect job w/ ACLs enabled w/o operator token")
|
||||
|
||||
jobID := "connect" + uuid.Short()
|
||||
tc.jobIDs = append(tc.jobIDs, jobID) // need to clean up if the test fails
|
||||
|
||||
job, err := jobspec.ParseFile(demoConnectJob)
|
||||
f.NoError(err)
|
||||
jobAPI := tc.Nomad().Jobs()
|
||||
|
||||
// Explicitly show the ConsulToken is not set
|
||||
job.ConsulToken = nil
|
||||
job.ID = &jobID
|
||||
|
||||
_, _, err = jobAPI.Register(job, nil)
|
||||
f.Error(err)
|
||||
|
||||
t.Log("job correctly rejected, with error:", err)
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterFakeOperatorToken(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
t.Skip("we don't have consul.allow_unauthenticated=false set because it would required updating every E2E test to pass a Consul token")
|
||||
|
||||
t.Log("test register Connect job w/ ACLs enabled w/ operator token")
|
||||
|
||||
policyID := tc.createConsulPolicy(consulPolicy{
|
||||
Name: "nomad-operator-policy-" + uuid.Short(),
|
||||
Rules: `service "count-api" { policy = "write" } service "count-dashboard" { policy = "write" }`,
|
||||
}, f)
|
||||
t.Log("created operator policy:", policyID)
|
||||
|
||||
// generate a fake consul token token
|
||||
fakeToken := uuid.Generate()
|
||||
|
||||
jobID := "connect" + uuid.Short()
|
||||
tc.jobIDs = append(tc.jobIDs, jobID) // need to clean up if the test fails
|
||||
|
||||
job := tc.parseJobSpecFile(t, demoConnectJob)
|
||||
|
||||
jobAPI := tc.Nomad().Jobs()
|
||||
|
||||
// deliberately set the fake Consul token
|
||||
job.ConsulToken = &fakeToken
|
||||
job.ID = &jobID
|
||||
|
||||
// should fail, because the token is fake
|
||||
_, _, err := jobAPI.Register(job, nil)
|
||||
f.Error(err)
|
||||
t.Log("job correctly rejected, with error:", err)
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) TestConnectACLsConnectDemo(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
t.Log("test register Connect job w/ ACLs enabled w/ operator token")
|
||||
|
||||
// === Setup ACL policy and mint Operator token ===
|
||||
|
||||
// create a policy allowing writes of services "count-api" and "count-dashboard"
|
||||
policyID := tc.createConsulPolicy(consulPolicy{
|
||||
Name: "nomad-operator-policy-" + uuid.Short(),
|
||||
Rules: `service "count-api" { policy = "write" } service "count-dashboard" { policy = "write" }`,
|
||||
}, f)
|
||||
t.Log("created operator policy:", policyID)
|
||||
|
||||
// create a Consul "operator token" blessed with the above policy
|
||||
operatorToken := tc.createOperatorToken(policyID, f)
|
||||
t.Log("created operator token:", operatorToken)
|
||||
|
||||
jobID := connectJobID()
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectJob, jobID, operatorToken)
|
||||
f.Equal(2, len(allocs), "expected 2 allocs for connect demo", allocs)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
f.Equal(2, len(allocIDs), "expected 2 allocIDs for connect demo", allocIDs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
|
||||
// === Check Consul SI tokens were generated for sidecars ===
|
||||
foundSITokens := tc.countSITokens(t)
|
||||
f.Equal(2, len(foundSITokens), "expected 2 SI tokens total: %v", foundSITokens)
|
||||
f.Equal(1, foundSITokens["connect-proxy-count-api"], "expected 1 SI token for connect-proxy-count-api: %v", foundSITokens)
|
||||
f.Equal(1, foundSITokens["connect-proxy-count-dashboard"], "expected 1 SI token for connect-proxy-count-dashboard: %v", foundSITokens)
|
||||
|
||||
t.Log("connect legacy job with ACLs enable finished")
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) TestConnectACLsConnectDemoNamespaced(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
t.Log("test register Connect job w/ ACLs enabled w/ operator token")
|
||||
|
||||
// === Setup ACL policy within a namespace and mint Operator token ===
|
||||
|
||||
// create a namespace
|
||||
namespace := tc.createConsulNamespace("ns-"+uuid.Short(), f)
|
||||
tc.consulNamespace = namespace
|
||||
t.Log("created namespace:", namespace)
|
||||
|
||||
// create a policy allowing writes of services "count-api" and "count-dashboard"
|
||||
policyID := tc.createConsulPolicy(consulPolicy{
|
||||
Name: "nomad-operator-policy-" + uuid.Short(),
|
||||
Rules: `service "count-api" { policy = "write" } service "count-dashboard" { policy = "write" }`,
|
||||
Namespace: namespace,
|
||||
}, f)
|
||||
t.Log("created operator policy:", policyID)
|
||||
|
||||
// create a Consul "operator token" blessed with the above policy
|
||||
operatorToken := tc.createOperatorTokenNamespaced(policyID, namespace, f)
|
||||
t.Log("created operator token:", operatorToken)
|
||||
|
||||
jobID := connectJobID()
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectJob, jobID, operatorToken)
|
||||
f.Equal(2, len(allocs), "expected 2 allocs for connect demo", allocs)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
f.Equal(2, len(allocIDs), "expected 2 allocIDs for connect demo", allocIDs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
|
||||
// === Check Consul SI tokens were generated for sidecars ===
|
||||
foundSITokens := tc.countSITokens(t)
|
||||
f.Equal(2, len(foundSITokens), "expected 2 SI tokens total: %v", foundSITokens)
|
||||
f.Equal(1, foundSITokens["connect-proxy-count-api"], "expected 1 SI token for connect-proxy-count-api: %v", foundSITokens)
|
||||
f.Equal(1, foundSITokens["connect-proxy-count-dashboard"], "expected 1 SI token for connect-proxy-count-dashboard: %v", foundSITokens)
|
||||
|
||||
t.Log("connect legacy job with ACLs enable finished")
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) TestConnectACLsConnectNativeDemo(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
t.Log("test register Connect job w/ ACLs enabled w/ operator token")
|
||||
|
||||
// === Setup ACL policy and mint Operator token ===
|
||||
|
||||
// create a policy allowing writes of services "uuid-fe" and "uuid-api"
|
||||
policyID := tc.createConsulPolicy(consulPolicy{
|
||||
Name: "nomad-operator-policy-" + uuid.Short(),
|
||||
Rules: `service "uuid-fe" { policy = "write" } service "uuid-api" { policy = "write" }`,
|
||||
}, f)
|
||||
t.Log("created operator policy:", policyID)
|
||||
|
||||
// create a Consul "operator token" blessed with the above policy
|
||||
operatorToken := tc.createOperatorToken(policyID, f)
|
||||
t.Log("created operator token:", operatorToken)
|
||||
|
||||
jobID := connectJobID()
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectNativeJob, jobID, operatorToken)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
|
||||
// === Check Consul SI tokens were generated for native tasks ===
|
||||
foundSITokens := tc.countSITokens(t)
|
||||
f.Equal(2, len(foundSITokens), "expected 2 SI tokens total: %v", foundSITokens)
|
||||
f.Equal(1, foundSITokens["frontend"], "expected 1 SI token for frontend: %v", foundSITokens)
|
||||
f.Equal(1, foundSITokens["generate"], "expected 1 SI token for generate: %v", foundSITokens)
|
||||
|
||||
t.Log("connect native job with ACLs enabled finished")
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) TestConnectACLsConnectIngressGatewayDemo(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
t.Log("test register Connect Ingress Gateway job w/ ACLs enabled")
|
||||
|
||||
// setup ACL policy and mint operator token
|
||||
|
||||
policyID := tc.createConsulPolicy(consulPolicy{
|
||||
Name: "nomad-operator-policy-" + uuid.Short(),
|
||||
Rules: `service "my-ingress-service" { policy = "write" } service "uuid-api" { policy = "write" }`,
|
||||
}, f)
|
||||
operatorToken := tc.createOperatorToken(policyID, f)
|
||||
t.Log("created operator token:", operatorToken)
|
||||
|
||||
jobID := connectJobID()
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectIngressGateway, jobID, operatorToken)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
|
||||
foundSITokens := tc.countSITokens(t)
|
||||
f.Equal(2, len(foundSITokens), "expected 2 SI tokens total: %v", foundSITokens)
|
||||
f.Equal(1, foundSITokens["connect-ingress-my-ingress-service"], "expected 1 SI token for connect-ingress-my-ingress-service: %v", foundSITokens)
|
||||
f.Equal(1, foundSITokens["generate"], "expected 1 SI token for generate: %v", foundSITokens)
|
||||
|
||||
t.Log("connect ingress gateway job with ACLs enabled finished")
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) TestConnectACLsConnectTerminatingGatewayDemo(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
t.Log("test register Connect Terminating Gateway job w/ ACLs enabled")
|
||||
|
||||
// setup ACL policy and mint operator token
|
||||
|
||||
policyID := tc.createConsulPolicy(consulPolicy{
|
||||
Name: "nomad-operator-policy-" + uuid.Short(),
|
||||
Rules: `service "api-gateway" { policy = "write" } service "count-dashboard" { policy = "write" }`,
|
||||
}, f)
|
||||
operatorToken := tc.createOperatorToken(policyID, f)
|
||||
t.Log("created operator token:", operatorToken)
|
||||
|
||||
jobID := connectJobID()
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectTerminatingGateway, jobID, operatorToken)
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
|
||||
foundSITokens := tc.countSITokens(t)
|
||||
f.Equal(2, len(foundSITokens), "expected 2 SI tokens total: %v", foundSITokens)
|
||||
f.Equal(1, foundSITokens["connect-terminating-api-gateway"], "expected 1 SI token for connect-terminating-api-gateway: %v", foundSITokens)
|
||||
f.Equal(1, foundSITokens["connect-proxy-count-dashboard"], "expected 1 SI token for count-dashboard: %v", foundSITokens)
|
||||
|
||||
t.Log("connect terminating gateway job with ACLs enabled finished")
|
||||
}
|
||||
|
||||
var (
|
||||
siTokenRe = regexp.MustCompile(`_nomad_si \[[\w-]{36}] \[[\w-]{36}] \[([\S]+)]`)
|
||||
)
|
||||
|
||||
func (tc *ConnectACLsE2ETest) serviceofSIToken(description string) string {
|
||||
if m := siTokenRe.FindStringSubmatch(description); len(m) == 2 {
|
||||
return m[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) countSITokens(t *testing.T) map[string]int {
|
||||
aclAPI := tc.Consul().ACL()
|
||||
tokens, _, err := aclAPI.TokenList(&consulapi.QueryOptions{
|
||||
Token: tc.consulManagementToken,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// count the number of SI tokens matching each service name
|
||||
foundSITokens := make(map[string]int)
|
||||
for _, token := range tokens {
|
||||
if service := tc.serviceofSIToken(token.Description); service != "" {
|
||||
foundSITokens[service]++
|
||||
}
|
||||
}
|
||||
|
||||
return foundSITokens
|
||||
}
|
||||
|
||||
func (tc *ConnectACLsE2ETest) parseJobSpecFile(t *testing.T, filename string) *nomadapi.Job {
|
||||
job, err := jobspec.ParseFile(filename)
|
||||
require.NoError(t, err)
|
||||
return job
|
||||
}
|
||||
@@ -4,19 +4,241 @@
|
||||
package connect
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
capi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/v3/jobs3"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/shoenig/test"
|
||||
"github.com/shoenig/test/must"
|
||||
)
|
||||
|
||||
// TestConnect_LegacyACLs tests the workflows where the operator provides their
|
||||
// own token, rather than using Nomad's token or Workload Identity
|
||||
func TestConnect_LegacyACLs(t *testing.T) {
|
||||
|
||||
nomadClient := e2eutil.NomadClient(t)
|
||||
e2eutil.WaitForLeader(t, nomadClient)
|
||||
e2eutil.WaitForNodesReady(t, nomadClient, 2)
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, err := e2eutil.Command("nomad", "system", "gc")
|
||||
test.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("ConnectDemo", testConnectDemoLegacyACLs)
|
||||
t.Run("ConnectDemoNamespaced", testConnectDemoLegacyACLsNamespaced)
|
||||
t.Run("ConnectNativeDemo", testConnectNativeDemoLegacyACLs)
|
||||
t.Run("ConnectIngressGatewayDemo", testConnectIngressGatewayDemoLegacyACLs)
|
||||
t.Run("ConnectTerminatingGateway", testConnectTerminatingGatewayLegacyACLs)
|
||||
}
|
||||
|
||||
func createPolicy(t *testing.T, cc *capi.Client, ns, rules string) (string, func()) {
|
||||
policy, _, err := cc.ACL().PolicyCreate(&capi.ACLPolicy{
|
||||
Name: "nomad-operator-policy-" + uuid.Short(),
|
||||
Rules: rules,
|
||||
Namespace: ns,
|
||||
}, nil)
|
||||
must.NoError(t, err)
|
||||
return policy.ID, func() { cc.ACL().PolicyDelete(policy.ID, nil) }
|
||||
}
|
||||
|
||||
func createToken(t *testing.T, cc *capi.Client, policyID, ns string) (string, func()) {
|
||||
token, _, err := cc.ACL().TokenCreate(&capi.ACLToken{
|
||||
Description: "test token",
|
||||
Policies: []*capi.ACLTokenPolicyLink{{ID: policyID}},
|
||||
Namespace: ns,
|
||||
}, nil)
|
||||
must.NoError(t, err)
|
||||
return token.SecretID, func() { cc.ACL().TokenDelete(token.AccessorID, nil) }
|
||||
}
|
||||
|
||||
// testConnectDemoLegacyACLs tests the demo job file used in Connect Integration examples.
|
||||
func testConnectDemoLegacyACLs(t *testing.T) {
|
||||
cc := e2eutil.ConsulClient(t)
|
||||
|
||||
policyID, policyCleanup := createPolicy(t, cc, "default",
|
||||
`service "count-api" { policy = "write" } service "count-dashboard" { policy = "write" }`)
|
||||
t.Cleanup(policyCleanup)
|
||||
|
||||
token, tokenCleanup := createToken(t, cc, policyID, "default")
|
||||
t.Cleanup(tokenCleanup)
|
||||
|
||||
_, cleanup := jobs3.Submit(t, "./input/demo.nomad",
|
||||
jobs3.Timeout(time.Second*60), jobs3.LegacyConsulToken(token))
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
ixn := &capi.Intention{
|
||||
SourceName: "count-dashboard",
|
||||
DestinationName: "count-api",
|
||||
Action: "allow",
|
||||
}
|
||||
_, err := cc.Connect().IntentionUpsert(ixn, nil)
|
||||
must.NoError(t, err, must.Sprint("could not create intention"))
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, err := cc.Connect().IntentionDeleteExact("count-dashboard", "count-api", nil)
|
||||
test.NoError(t, err)
|
||||
})
|
||||
|
||||
assertServiceOk(t, cc, "count-api-sidecar-proxy")
|
||||
assertServiceOk(t, cc, "count-dashboard-sidecar-proxy")
|
||||
assertSITokens(t, cc, map[string]int{
|
||||
"connect-proxy-count-api": 1, "connect-proxy-count-dashboard": 1})
|
||||
|
||||
}
|
||||
|
||||
// testConnectDemoLegacyACLsNamespaced tests the demo job file used in Connect
|
||||
// Integration examples.
|
||||
func testConnectDemoLegacyACLsNamespaced(t *testing.T) {
|
||||
cc := e2eutil.ConsulClient(t)
|
||||
|
||||
ns := "ns-" + uuid.Short()
|
||||
_, _, err := cc.Namespaces().Create(&capi.Namespace{Name: ns}, nil)
|
||||
must.NoError(t, err)
|
||||
t.Cleanup(func() { cc.Namespaces().Delete(ns, nil) })
|
||||
|
||||
policyID, policyCleanup := createPolicy(t, cc, ns,
|
||||
`service "count-api" { policy = "write" } service "count-dashboard" { policy = "write" }`)
|
||||
t.Cleanup(policyCleanup)
|
||||
|
||||
token, tokenCleanup := createToken(t, cc, policyID, ns)
|
||||
t.Cleanup(tokenCleanup)
|
||||
|
||||
_, cleanup := jobs3.Submit(t, "./input/demo.nomad",
|
||||
jobs3.Timeout(time.Second*60), jobs3.LegacyConsulToken(token))
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
ixn := &capi.Intention{
|
||||
SourceName: "count-dashboard",
|
||||
DestinationName: "count-api",
|
||||
Action: "allow",
|
||||
}
|
||||
_, err = cc.Connect().IntentionUpsert(ixn, nil)
|
||||
must.NoError(t, err, must.Sprint("could not create intention"))
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, err := cc.Connect().IntentionDeleteExact("count-dashboard", "count-api", nil)
|
||||
test.NoError(t, err)
|
||||
})
|
||||
|
||||
assertServiceOk(t, cc, "count-api-sidecar-proxy")
|
||||
assertServiceOk(t, cc, "count-dashboard-sidecar-proxy")
|
||||
assertSITokens(t, cc, map[string]int{
|
||||
"connect-proxy-count-api": 1, "connect-proxy-count-dashboard": 1})
|
||||
|
||||
}
|
||||
|
||||
// testConnectNativeDemoLegacyACLs tests the demo job file used in Connect Native
|
||||
// Integration examples.
|
||||
func testConnectNativeDemoLegacyACLs(t *testing.T) {
|
||||
cc := e2eutil.ConsulClient(t)
|
||||
|
||||
policyID, policyCleanup := createPolicy(t, cc, "default",
|
||||
`service "uuid-fe" { policy = "write" } service "uuid-api" { policy = "write" }`)
|
||||
t.Cleanup(policyCleanup)
|
||||
|
||||
token, tokenCleanup := createToken(t, cc, policyID, "default")
|
||||
t.Cleanup(tokenCleanup)
|
||||
|
||||
_, cleanup := jobs3.Submit(t, "./input/native-demo.nomad",
|
||||
jobs3.Timeout(time.Second*60), jobs3.LegacyConsulToken(token))
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
assertSITokens(t, cc, map[string]int{"frontend": 1, "generate": 1})
|
||||
}
|
||||
|
||||
// testConnectIngressGatewayDemoLegacyACLs tests a job with an ingress gateway
|
||||
func testConnectIngressGatewayDemoLegacyACLs(t *testing.T) {
|
||||
cc := e2eutil.ConsulClient(t)
|
||||
|
||||
policyID, policyCleanup := createPolicy(t, cc, "default",
|
||||
`service "my-ingress-service" { policy = "write" } service "uuid-api" { policy = "write" }`)
|
||||
t.Cleanup(policyCleanup)
|
||||
|
||||
token, tokenCleanup := createToken(t, cc, policyID, "default")
|
||||
t.Cleanup(tokenCleanup)
|
||||
|
||||
_, cleanup := jobs3.Submit(t, "./input/ingress-gateway.nomad",
|
||||
jobs3.Timeout(time.Second*60), jobs3.LegacyConsulToken(token))
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
assertSITokens(t, cc, map[string]int{"connect-ingress-my-ingress-service": 1, "generate": 1})
|
||||
}
|
||||
|
||||
// testConnectTerminatingGatewayLegacyACLs tests a job with a terminating gateway
|
||||
func testConnectTerminatingGatewayLegacyACLs(t *testing.T) {
|
||||
cc := e2eutil.ConsulClient(t)
|
||||
|
||||
policyID, policyCleanup := createPolicy(t, cc, "default",
|
||||
`service "api-gateway" { policy = "write" } service "count-dashboard" { policy = "write" }`)
|
||||
t.Cleanup(policyCleanup)
|
||||
|
||||
token, tokenCleanup := createToken(t, cc, policyID, "default")
|
||||
t.Cleanup(tokenCleanup)
|
||||
|
||||
_, cleanup := jobs3.Submit(t, "./input/terminating-gateway.nomad",
|
||||
jobs3.Timeout(time.Second*60), jobs3.LegacyConsulToken(token))
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
ixn := &capi.Intention{
|
||||
SourceName: "count-dashboard",
|
||||
DestinationName: "count-api",
|
||||
Action: "allow",
|
||||
}
|
||||
_, err := cc.Connect().IntentionUpsert(ixn, nil)
|
||||
must.NoError(t, err, must.Sprint("could not create intention"))
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, err := cc.Connect().IntentionDeleteExact("count-dashboard", "count-api", nil)
|
||||
test.NoError(t, err)
|
||||
})
|
||||
|
||||
assertServiceOk(t, cc, "api-gateway")
|
||||
assertServiceOk(t, cc, "count-dashboard-sidecar-proxy")
|
||||
assertServiceOk(t, cc, "count-api")
|
||||
|
||||
assertSITokens(t, cc, map[string]int{
|
||||
"connect-terminating-api-gateway": 1, "connect-proxy-count-dashboard": 1})
|
||||
}
|
||||
|
||||
func assertSITokens(t *testing.T, cc *capi.Client, expect map[string]int) {
|
||||
tokens, _, err := cc.ACL().TokenList(nil)
|
||||
must.NoError(t, err)
|
||||
|
||||
// count the number of SI tokens matching each service name
|
||||
foundSITokens := make(map[string]int)
|
||||
for _, token := range tokens {
|
||||
if service := serviceofSIToken(token.Description); service != "" {
|
||||
foundSITokens[service]++
|
||||
}
|
||||
}
|
||||
for expected, count := range expect {
|
||||
test.Eq(t, count, foundSITokens[expected], test.Sprintf("expected tokens for %q", expected))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_serviceOfSIToken(t *testing.T) {
|
||||
try := func(description, exp string) {
|
||||
tc := new(ConnectACLsE2ETest)
|
||||
result := tc.serviceofSIToken(description)
|
||||
require.Equal(t, exp, result)
|
||||
result := serviceofSIToken(description)
|
||||
must.Eq(t, exp, result)
|
||||
}
|
||||
|
||||
try("", "")
|
||||
try("foobarbaz", "")
|
||||
try("_nomad_si [8b1a5d3f-7e61-4a5a-8a57-7e7ad91e63b6] [8b1a5d3f-7e61-4a5a-8a57-7e7ad91e63b6] [foo-service]", "foo-service")
|
||||
}
|
||||
|
||||
var (
|
||||
siTokenRe = regexp.MustCompile(`_nomad_si \[[\w-]{36}] \[[\w-]{36}] \[([\S]+)]`)
|
||||
)
|
||||
|
||||
func serviceofSIToken(description string) string {
|
||||
if m := siTokenRe.FindStringSubmatch(description); len(m) == 2 {
|
||||
return m[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package connect
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
capi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
consulNamespace = "default"
|
||||
)
|
||||
|
||||
type ConnectClientStateE2ETest struct {
|
||||
framework.TC
|
||||
jobIDs []string
|
||||
}
|
||||
|
||||
func (tc *ConnectClientStateE2ETest) BeforeAll(f *framework.F) {
|
||||
e2eutil.WaitForLeader(f.T(), tc.Nomad())
|
||||
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 1)
|
||||
}
|
||||
|
||||
func (tc *ConnectClientStateE2ETest) AfterEach(f *framework.F) {
|
||||
if os.Getenv("NOMAD_TEST_SKIPCLEANUP") == "1" {
|
||||
return
|
||||
}
|
||||
|
||||
for _, id := range tc.jobIDs {
|
||||
tc.Nomad().Jobs().Deregister(id, true, nil)
|
||||
}
|
||||
tc.jobIDs = []string{}
|
||||
tc.Nomad().System().GarbageCollect()
|
||||
}
|
||||
|
||||
func (tc *ConnectClientStateE2ETest) TestClientRestart(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
t.Skip("skipping test that does nomad agent restart")
|
||||
|
||||
jobID := "connect" + uuid.Generate()[0:8]
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
client := tc.Nomad()
|
||||
consulClient := tc.Consul()
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, client,
|
||||
"connect/input/demo.nomad", jobID, "")
|
||||
f.Equal(2, len(allocs))
|
||||
|
||||
e2eutil.RequireConsulStatus(f.Assertions, consulClient, consulNamespace, "count-api-sidecar-proxy", capi.HealthPassing)
|
||||
nodeID := allocs[0].NodeID
|
||||
|
||||
restartID, err := e2eutil.AgentRestart(client, nodeID)
|
||||
if restartID != "" {
|
||||
tc.jobIDs = append(tc.jobIDs, restartID)
|
||||
}
|
||||
if err != nil {
|
||||
t.Skip("node cannot be restarted", err)
|
||||
}
|
||||
|
||||
e2eutil.RequireConsulStatus(f.Assertions, consulClient, consulNamespace, "count-api-sidecar-proxy", capi.HealthPassing)
|
||||
}
|
||||
50
e2e/connect/client_test.go
Normal file
50
e2e/connect/client_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package connect
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
capi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/v3/jobs3"
|
||||
"github.com/shoenig/test"
|
||||
"github.com/shoenig/test/must"
|
||||
)
|
||||
|
||||
func TestConnect_ClientRestart(t *testing.T) {
|
||||
t.Skip("skipping test that does nomad agent restart")
|
||||
|
||||
nomadClient := e2eutil.NomadClient(t)
|
||||
e2eutil.WaitForLeader(t, nomadClient)
|
||||
e2eutil.WaitForNodesReady(t, nomadClient, 2)
|
||||
|
||||
sub, cleanup := jobs3.Submit(t, "./input/demo.nomad")
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
cc := e2eutil.ConsulClient(t)
|
||||
|
||||
ixn := &capi.Intention{
|
||||
SourceName: "count-dashboard",
|
||||
DestinationName: "count-api",
|
||||
Action: "allow",
|
||||
}
|
||||
_, err := cc.Connect().IntentionUpsert(ixn, nil)
|
||||
must.NoError(t, err, must.Sprint("could not create intention"))
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, err := cc.Connect().IntentionDeleteExact("count-dashboard", "count-api", nil)
|
||||
test.NoError(t, err)
|
||||
})
|
||||
|
||||
assertServiceOk(t, cc, "count-api-sidecar-proxy")
|
||||
assertServiceOk(t, cc, "count-dashboard-sidecar-proxy")
|
||||
|
||||
nodeID := sub.Allocs()[0].NodeID
|
||||
_, err = e2eutil.AgentRestart(nomadClient, nodeID)
|
||||
must.Error(t, err, must.Sprint("node cannot be restarted"))
|
||||
|
||||
assertServiceOk(t, cc, "count-api-sidecar-proxy")
|
||||
assertServiceOk(t, cc, "count-dashboard-sidecar-proxy")
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package connect
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
// envConsulToken is the consul http token environment variable
|
||||
envConsulToken = "CONSUL_HTTP_TOKEN"
|
||||
|
||||
// demoConnectJob is the example connect enabled job useful for testing
|
||||
demoConnectJob = "connect/input/demo.nomad"
|
||||
|
||||
// demoConnectCustomProxyExposed is a connect job with custom sidecar_task
|
||||
// that also uses the expose check feature.
|
||||
demoConnectCustomProxyExposed = "connect/input/expose-custom.nomad"
|
||||
|
||||
// demoConnectNativeJob is the example connect native enabled job useful for testing
|
||||
demoConnectNativeJob = "connect/input/native-demo.nomad"
|
||||
|
||||
// demoConnectIngressGateway is the example ingress gateway job useful for testing
|
||||
demoConnectIngressGateway = "connect/input/ingress-gateway.nomad"
|
||||
|
||||
// demoConnectMultiIngressGateway is the example multi ingress gateway job useful for testing
|
||||
demoConnectMultiIngressGateway = "connect/input/multi-ingress.nomad"
|
||||
|
||||
// demoConnectTerminatingGateway is the example terminating gateway job useful for testing
|
||||
demoConnectTerminatingGateway = "connect/input/terminating-gateway.nomad"
|
||||
)
|
||||
|
||||
type ConnectE2ETest struct {
|
||||
framework.TC
|
||||
jobIds []string
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Connect tests without Consul ACLs enabled.
|
||||
framework.AddSuites(&framework.TestSuite{
|
||||
Component: "Connect",
|
||||
CanRunLocal: true,
|
||||
Consul: true,
|
||||
Cases: []framework.TestCase{
|
||||
new(ConnectE2ETest),
|
||||
new(ConnectClientStateE2ETest),
|
||||
},
|
||||
})
|
||||
|
||||
framework.AddSuites(&framework.TestSuite{
|
||||
Component: "ConnectACLs",
|
||||
CanRunLocal: false,
|
||||
Consul: true,
|
||||
Parallel: false,
|
||||
Cases: []framework.TestCase{
|
||||
new(ConnectACLsE2ETest),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (tc *ConnectE2ETest) BeforeAll(f *framework.F) {
|
||||
e2eutil.WaitForLeader(f.T(), tc.Nomad())
|
||||
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 2)
|
||||
}
|
||||
|
||||
func (tc *ConnectE2ETest) AfterEach(f *framework.F) {
|
||||
if os.Getenv("NOMAD_TEST_SKIPCLEANUP") == "1" {
|
||||
return
|
||||
}
|
||||
|
||||
for _, id := range tc.jobIds {
|
||||
tc.Nomad().Jobs().Deregister(id, true, nil)
|
||||
}
|
||||
tc.jobIds = []string{}
|
||||
tc.Nomad().System().GarbageCollect()
|
||||
}
|
||||
|
||||
func connectJobID() string {
|
||||
return "connect" + uuid.Generate()[0:8]
|
||||
}
|
||||
|
||||
// TestConnectDemo tests the demo job file used in Connect Integration examples.
|
||||
func (tc *ConnectE2ETest) TestConnectDemo(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
jobID := connectJobID()
|
||||
tc.jobIds = append(tc.jobIds, jobID)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectJob, jobID, "")
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
}
|
||||
|
||||
// TestConnectCustomSidecarExposed tests that a connect sidecar with custom task
|
||||
// definition can also make use of the expose service check feature.
|
||||
func (tc *ConnectE2ETest) TestConnectCustomSidecarExposed(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
jobID := connectJobID()
|
||||
tc.jobIds = append(tc.jobIds, jobID)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectCustomProxyExposed, jobID, "")
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
}
|
||||
|
||||
// TestConnectNativeDemo tests the demo job file used in Connect Native Integration examples.
|
||||
func (tc *ConnectE2ETest) TestConnectNativeDemo(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
jobID := connectJobID()
|
||||
tc.jobIds = append(tc.jobIds, jobID)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectNativeJob, jobID, "")
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
}
|
||||
|
||||
func (tc *ConnectE2ETest) TestConnectIngressGatewayDemo(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
jobID := connectJobID()
|
||||
tc.jobIds = append(tc.jobIds, jobID)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectIngressGateway, jobID, "")
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
}
|
||||
|
||||
func (tc *ConnectE2ETest) TestConnectMultiIngressGatewayDemo(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
jobID := connectJobID()
|
||||
tc.jobIds = append(tc.jobIds, jobID)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectMultiIngressGateway, jobID, "")
|
||||
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
}
|
||||
|
||||
func (tc *ConnectE2ETest) TestConnectTerminatingGatewayDemo(f *framework.F) {
|
||||
|
||||
t := f.T()
|
||||
|
||||
jobID := connectJobID()
|
||||
tc.jobIds = append(tc.jobIds, jobID)
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), demoConnectTerminatingGateway, jobID, "")
|
||||
|
||||
allocIDs := e2eutil.AllocIDsFromAllocationListStubs(allocs)
|
||||
e2eutil.WaitForAllocsRunning(t, tc.Nomad(), allocIDs)
|
||||
}
|
||||
132
e2e/connect/connect_test.go
Normal file
132
e2e/connect/connect_test.go
Normal file
@@ -0,0 +1,132 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package connect
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
capi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/v3/jobs3"
|
||||
"github.com/shoenig/test"
|
||||
"github.com/shoenig/test/must"
|
||||
)
|
||||
|
||||
func TestConnect(t *testing.T) {
|
||||
|
||||
nomadClient := e2eutil.NomadClient(t)
|
||||
e2eutil.WaitForLeader(t, nomadClient)
|
||||
e2eutil.WaitForNodesReady(t, nomadClient, 2)
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, err := e2eutil.Command("nomad", "system", "gc")
|
||||
test.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("ConnectDemo", testConnectDemo)
|
||||
t.Run("ConnectCustomSidecarExposed", testConnectCustomSidecarExposed)
|
||||
t.Run("ConnectNativeDemo", testConnectNativeDemo)
|
||||
t.Run("ConnectIngressGatewayDemo", testConnectIngressGatewayDemo)
|
||||
t.Run("ConnectMultiIngress", testConnectMultiIngressGateway)
|
||||
t.Run("ConnectTerminatingGateway", testConnectTerminatingGateway)
|
||||
t.Run("ConnectMultiService", testConnectMultiService)
|
||||
}
|
||||
|
||||
// testConnectDemo tests the demo job file used in Connect Integration examples.
|
||||
func testConnectDemo(t *testing.T) {
|
||||
_, cleanup := jobs3.Submit(t, "./input/demo.nomad", jobs3.Timeout(time.Second*60))
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
cc := e2eutil.ConsulClient(t)
|
||||
|
||||
ixn := &capi.Intention{
|
||||
SourceName: "count-dashboard",
|
||||
DestinationName: "count-api",
|
||||
Action: "allow",
|
||||
}
|
||||
_, err := cc.Connect().IntentionUpsert(ixn, nil)
|
||||
must.NoError(t, err, must.Sprint("could not create intention"))
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, err := cc.Connect().IntentionDeleteExact("count-dashboard", "count-api", nil)
|
||||
test.NoError(t, err)
|
||||
})
|
||||
|
||||
assertServiceOk(t, cc, "count-api-sidecar-proxy")
|
||||
assertServiceOk(t, cc, "count-dashboard-sidecar-proxy")
|
||||
}
|
||||
|
||||
// testConnectCustomSidecarExposed tests that a connect sidecar with custom task
|
||||
// definition can also make use of the expose service check feature.
|
||||
func testConnectCustomSidecarExposed(t *testing.T) {
|
||||
_, cleanup := jobs3.Submit(t, "./input/expose-custom.nomad", jobs3.Timeout(time.Second*60))
|
||||
t.Cleanup(cleanup)
|
||||
}
|
||||
|
||||
// testConnectNativeDemo tests the demo job file used in Connect Native
|
||||
// Integration examples.
|
||||
func testConnectNativeDemo(t *testing.T) {
|
||||
_, cleanup := jobs3.Submit(t, "./input/native-demo.nomad", jobs3.Timeout(time.Second*60))
|
||||
t.Cleanup(cleanup)
|
||||
}
|
||||
|
||||
// testConnectIngressGatewayDemo tests a job with an ingress gateway
|
||||
func testConnectIngressGatewayDemo(t *testing.T) {
|
||||
_, cleanup := jobs3.Submit(t, "./input/ingress-gateway.nomad", jobs3.Timeout(time.Second*60))
|
||||
t.Cleanup(cleanup)
|
||||
}
|
||||
|
||||
// testConnectMultiIngressGateway tests a job with multiple ingress gateways
|
||||
func testConnectMultiIngressGateway(t *testing.T) {
|
||||
_, cleanup := jobs3.Submit(t, "./input/multi-ingress.nomad", jobs3.Timeout(time.Second*60))
|
||||
t.Cleanup(cleanup)
|
||||
}
|
||||
|
||||
// testConnectTerminatingGateway tests a job with a terminating gateway
|
||||
func testConnectTerminatingGateway(t *testing.T) {
|
||||
_, cleanup := jobs3.Submit(t, "./input/terminating-gateway.nomad", jobs3.Timeout(time.Second*60))
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
cc := e2eutil.ConsulClient(t)
|
||||
|
||||
ixn := &capi.Intention{
|
||||
SourceName: "count-dashboard",
|
||||
DestinationName: "count-api",
|
||||
Action: "allow",
|
||||
}
|
||||
_, err := cc.Connect().IntentionUpsert(ixn, nil)
|
||||
must.NoError(t, err, must.Sprint("could not create intention"))
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, err := cc.Connect().IntentionDeleteExact("count-dashboard", "count-api", nil)
|
||||
test.NoError(t, err)
|
||||
})
|
||||
|
||||
assertServiceOk(t, cc, "api-gateway")
|
||||
assertServiceOk(t, cc, "count-dashboard-sidecar-proxy")
|
||||
assertServiceOk(t, cc, "count-api")
|
||||
}
|
||||
|
||||
// testConnectMultiService tests a job with multiple Connect blocks in the same
|
||||
// group
|
||||
func testConnectMultiService(t *testing.T) {
|
||||
_, cleanup := jobs3.Submit(t, "./input/multi-service.nomad", jobs3.Timeout(time.Second*60))
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
cc := e2eutil.ConsulClient(t)
|
||||
assertServiceOk(t, cc, "echo1-sidecar-proxy")
|
||||
assertServiceOk(t, cc, "echo2-sidecar-proxy")
|
||||
}
|
||||
|
||||
// assertServiceOk is a test helper to assert a service is passing health checks, if any
|
||||
func assertServiceOk(t *testing.T, cc *capi.Client, name string) {
|
||||
t.Helper()
|
||||
services, _, err := cc.Health().Service(name, "", false, nil)
|
||||
must.NoError(t, err)
|
||||
must.Greater(t, 0, len(services), must.Sprintf("found no services for %q", name))
|
||||
|
||||
status := services[0].Checks.AggregatedStatus()
|
||||
must.Eq(t, "passing", status)
|
||||
}
|
||||
5
e2e/connect/doc.go
Normal file
5
e2e/connect/doc.go
Normal file
@@ -0,0 +1,5 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
// Package connect provides tests for Nomad's Consul Connect integration.
|
||||
package connect
|
||||
@@ -1,143 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package connect
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/jobspec"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/kr/pretty"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestMultiServiceConnect tests running multiple envoy sidecars in the same allocation.
|
||||
func (tc *ConnectE2ETest) TestMultiServiceConnect(f *framework.F) {
|
||||
t := f.T()
|
||||
uuid := uuid.Generate()
|
||||
jobID := "connect" + uuid[0:8]
|
||||
tc.jobIds = append(tc.jobIds, jobID)
|
||||
jobapi := tc.Nomad().Jobs()
|
||||
|
||||
job, err := jobspec.ParseFile("connect/input/multi-service.nomad")
|
||||
require.NoError(t, err)
|
||||
job.ID = &jobID
|
||||
|
||||
resp, _, err := jobapi.Register(job, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
require.Zero(t, resp.Warnings)
|
||||
|
||||
EVAL:
|
||||
qopts := &api.QueryOptions{
|
||||
WaitIndex: resp.EvalCreateIndex,
|
||||
}
|
||||
evalapi := tc.Nomad().Evaluations()
|
||||
eval, qmeta, err := evalapi.Info(resp.EvalID, qopts)
|
||||
require.NoError(t, err)
|
||||
qopts.WaitIndex = qmeta.LastIndex
|
||||
|
||||
switch eval.Status {
|
||||
case "pending":
|
||||
goto EVAL
|
||||
case "complete":
|
||||
// Ok!
|
||||
case "failed", "canceled", "blocked":
|
||||
require.Failf(t, "expected complete status", "eval %s\n%s", eval.Status, pretty.Sprint(eval))
|
||||
default:
|
||||
require.Failf(t, "expected complete status", "unknown eval status: %s\n%s", eval.Status, pretty.Sprint(eval))
|
||||
}
|
||||
|
||||
// Assert there were 0 placement failures
|
||||
require.Zero(t, eval.FailedTGAllocs, pretty.Sprint(eval.FailedTGAllocs))
|
||||
require.Len(t, eval.QueuedAllocations, 1, pretty.Sprint(eval.QueuedAllocations))
|
||||
|
||||
// Assert allocs are running
|
||||
for i := 0; i < 20; i++ {
|
||||
allocs, qmeta, err := evalapi.Allocations(eval.ID, qopts)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, allocs, 1)
|
||||
qopts.WaitIndex = qmeta.LastIndex
|
||||
|
||||
running := 0
|
||||
for _, alloc := range allocs {
|
||||
switch alloc.ClientStatus {
|
||||
case "running":
|
||||
running++
|
||||
case "pending":
|
||||
// keep trying
|
||||
default:
|
||||
require.Failf(t, "alloc failed", "alloc: %s", pretty.Sprint(alloc))
|
||||
}
|
||||
}
|
||||
|
||||
if running == len(allocs) {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
allocs, _, err := evalapi.Allocations(eval.ID, qopts)
|
||||
require.NoError(t, err)
|
||||
allocIDs := make(map[string]bool, 1)
|
||||
for _, a := range allocs {
|
||||
if a.ClientStatus != "running" || a.DesiredStatus != "run" {
|
||||
require.Failf(t, "expected running status", "alloc %s (%s) terminal; client=%s desired=%s", a.TaskGroup, a.ID, a.ClientStatus, a.DesiredStatus)
|
||||
}
|
||||
allocIDs[a.ID] = true
|
||||
}
|
||||
|
||||
// Check Consul service health
|
||||
agentapi := tc.Consul().Agent()
|
||||
|
||||
failing := map[string]*consulapi.AgentCheck{}
|
||||
testutil.WaitForResultRetries(60, func() (bool, error) {
|
||||
defer time.Sleep(time.Second)
|
||||
|
||||
checks, err := agentapi.Checks()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Filter out checks for other services
|
||||
for cid, check := range checks {
|
||||
found := false
|
||||
for allocID := range allocIDs {
|
||||
if strings.Contains(check.ServiceID, allocID) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
delete(checks, cid)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure checks are all passing
|
||||
failing = map[string]*consulapi.AgentCheck{}
|
||||
for _, check := range checks {
|
||||
if check.Status != "passing" {
|
||||
failing[check.CheckID] = check
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(failing) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
t.Logf("still %d checks not passing", len(failing))
|
||||
return false, fmt.Errorf("checks are not passing %v %v", len(failing), pretty.Sprint(failing))
|
||||
}, func(e error) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
require.Len(t, failing, 0, pretty.Sprint(failing))
|
||||
}
|
||||
@@ -13,38 +13,38 @@ import (
|
||||
|
||||
_ "github.com/hashicorp/nomad/e2e/affinities"
|
||||
_ "github.com/hashicorp/nomad/e2e/clientstate"
|
||||
_ "github.com/hashicorp/nomad/e2e/connect"
|
||||
_ "github.com/hashicorp/nomad/e2e/consul"
|
||||
_ "github.com/hashicorp/nomad/e2e/csi"
|
||||
_ "github.com/hashicorp/nomad/e2e/deployment"
|
||||
_ "github.com/hashicorp/nomad/e2e/eval_priority"
|
||||
_ "github.com/hashicorp/nomad/e2e/events"
|
||||
_ "github.com/hashicorp/nomad/e2e/isolation"
|
||||
_ "github.com/hashicorp/nomad/e2e/lifecycle"
|
||||
_ "github.com/hashicorp/nomad/e2e/metrics"
|
||||
_ "github.com/hashicorp/nomad/e2e/networking"
|
||||
_ "github.com/hashicorp/nomad/e2e/nomadexec"
|
||||
_ "github.com/hashicorp/nomad/e2e/oversubscription"
|
||||
_ "github.com/hashicorp/nomad/e2e/parameterized"
|
||||
_ "github.com/hashicorp/nomad/e2e/periodic"
|
||||
_ "github.com/hashicorp/nomad/e2e/podman"
|
||||
_ "github.com/hashicorp/nomad/e2e/quotas"
|
||||
_ "github.com/hashicorp/nomad/e2e/remotetasks"
|
||||
_ "github.com/hashicorp/nomad/e2e/scaling"
|
||||
_ "github.com/hashicorp/nomad/e2e/scalingpolicies"
|
||||
_ "github.com/hashicorp/nomad/e2e/scheduler_sysbatch"
|
||||
_ "github.com/hashicorp/nomad/e2e/scheduler_system"
|
||||
_ "github.com/hashicorp/nomad/e2e/spread"
|
||||
_ "github.com/hashicorp/nomad/e2e/taskevents"
|
||||
_ "github.com/hashicorp/nomad/e2e/vaultsecrets"
|
||||
|
||||
// these are no longer on the old framework but by importing them
|
||||
// we get a quick check that they compile on every commit
|
||||
_ "github.com/hashicorp/nomad/e2e/connect"
|
||||
_ "github.com/hashicorp/nomad/e2e/consultemplate"
|
||||
_ "github.com/hashicorp/nomad/e2e/disconnectedclients"
|
||||
_ "github.com/hashicorp/nomad/e2e/isolation"
|
||||
_ "github.com/hashicorp/nomad/e2e/metrics"
|
||||
_ "github.com/hashicorp/nomad/e2e/namespaces"
|
||||
_ "github.com/hashicorp/nomad/e2e/nodedrain"
|
||||
_ "github.com/hashicorp/nomad/e2e/nomadexec"
|
||||
_ "github.com/hashicorp/nomad/e2e/oversubscription"
|
||||
_ "github.com/hashicorp/nomad/e2e/podman"
|
||||
_ "github.com/hashicorp/nomad/e2e/rescheduling"
|
||||
_ "github.com/hashicorp/nomad/e2e/spread"
|
||||
_ "github.com/hashicorp/nomad/e2e/vaultsecrets"
|
||||
_ "github.com/hashicorp/nomad/e2e/volumes"
|
||||
)
|
||||
|
||||
|
||||
@@ -47,6 +47,8 @@ type Submission struct {
|
||||
waitComplete *set.Set[string] // groups to wait until complete
|
||||
inNamespace string
|
||||
authToken string
|
||||
|
||||
legacyConsulToken string
|
||||
}
|
||||
|
||||
func (sub *Submission) queryOptions() *nomadapi.QueryOptions {
|
||||
@@ -299,6 +301,9 @@ func (sub *Submission) run() {
|
||||
if job.Type == nil {
|
||||
job.Type = pointer.Of("service")
|
||||
}
|
||||
if sub.legacyConsulToken != "" {
|
||||
job.ConsulToken = pointer.Of(sub.legacyConsulToken)
|
||||
}
|
||||
|
||||
writeOpts := &nomadapi.WriteOptions{
|
||||
Namespace: sub.inNamespace,
|
||||
@@ -627,3 +632,9 @@ func SkipEvalComplete() Option {
|
||||
func SkipDeploymentHealthy() Option {
|
||||
panic("not yet implemented")
|
||||
}
|
||||
|
||||
func LegacyConsulToken(token string) Option {
|
||||
return func(c *Submission) {
|
||||
c.legacyConsulToken = token
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user