e2e: create a v3/ set of packages for creating Nomad e2e tests (#17620)

* e2e: create a v3/ set of packages for creating Nomad e2e tests

This PR creates an experimental set of packages under `e2e/v3/` for crafting
Nomad e2e tests. Unlike previous generations, this is an attempt at providing
a way to create tests in a declarative (ish) pattern, with a focus on being
easy to use, easy to cleanup, and easy to debug.

@shoenig is just trying this out to see how it goes.

Lots of features need to be implemented.
Many more docs need to be written.
Breaking changes are to be expected.
There are known and unknown bugs.
No warranty.

Quick run of `example` with verbose logging.

```shell
➜ NOMAD_E2E_VERBOSE=1 go test -v
=== RUN   TestExample
=== RUN   TestExample/testSleep
    util3.go:25: register (service) job: "sleep-809"
    util3.go:25: checking eval: 9f0ae04d-7259-9333-3763-44d0592d03a1, status: pending
    util3.go:25: checking eval: 9f0ae04d-7259-9333-3763-44d0592d03a1, status: complete
    util3.go:25: checking deployment: a85ad2f8-269c-6620-d390-8eac7a9c397d, status: running
    util3.go:25: checking deployment: a85ad2f8-269c-6620-d390-8eac7a9c397d, status: running
    util3.go:25: checking deployment: a85ad2f8-269c-6620-d390-8eac7a9c397d, status: running
    util3.go:25: checking deployment: a85ad2f8-269c-6620-d390-8eac7a9c397d, status: running
    util3.go:25: checking deployment: a85ad2f8-269c-6620-d390-8eac7a9c397d, status: successful
    util3.go:25: deployment a85ad2f8-269c-6620-d390-8eac7a9c397d was a success
    util3.go:25: deregister job "sleep-809"
    util3.go:25: system gc
=== RUN   TestExample/testNamespace
    util3.go:25: apply namespace "example-291"
    util3.go:25: register (service) job: "sleep-967"
    util3.go:25: checking eval: a2a2303a-adf1-2621-042e-a9654292e569, status: pending
    util3.go:25: checking eval: a2a2303a-adf1-2621-042e-a9654292e569, status: complete
    util3.go:25: checking deployment: 3395e9a8-3ffc-8990-d5b8-cc0ce311f302, status: running
    util3.go:25: checking deployment: 3395e9a8-3ffc-8990-d5b8-cc0ce311f302, status: running
    util3.go:25: checking deployment: 3395e9a8-3ffc-8990-d5b8-cc0ce311f302, status: running
    util3.go:25: checking deployment: 3395e9a8-3ffc-8990-d5b8-cc0ce311f302, status: successful
    util3.go:25: deployment 3395e9a8-3ffc-8990-d5b8-cc0ce311f302 was a success
    util3.go:25: deregister job "sleep-967"
    util3.go:25: system gc
    util3.go:25: cleanup namespace "example-291"
=== RUN   TestExample/testEnv
    util3.go:25: register (batch) job: "env-582"
    util3.go:25: checking eval: 600f3bce-ea17-6d13-9d20-9d9eb2a784f7, status: pending
    util3.go:25: checking eval: 600f3bce-ea17-6d13-9d20-9d9eb2a784f7, status: complete
    util3.go:25: deregister job "env-582"
    util3.go:25: system gc
--- PASS: TestExample (10.08s)
    --- PASS: TestExample/testSleep (5.02s)
    --- PASS: TestExample/testNamespace (4.02s)
    --- PASS: TestExample/testEnv (1.03s)
PASS
ok      github.com/hashicorp/nomad/e2e/example  10.079s
```

* cluster3: use filter for kernel.name instead of filtering manually
This commit is contained in:
Seth Hoenig
2023-06-23 09:10:49 -05:00
committed by GitHub
parent 6f7dc6a860
commit 5b5fbc0881
10 changed files with 999 additions and 41 deletions

View File

@@ -1,7 +1,5 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Package example contains basic examples of writing e2e tests for Nomad.
package example
// This package contains only tests, so this is a placeholder file to
// make sure builds don't fail with "no non-test Go files in" errors

View File

@@ -4,54 +4,49 @@
package example
import (
"os"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/hashicorp/nomad/e2e/e2eutil"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/e2e/v3/cluster3"
"github.com/hashicorp/nomad/e2e/v3/jobs3"
"github.com/hashicorp/nomad/e2e/v3/namespaces3"
"github.com/hashicorp/nomad/e2e/v3/util3"
"github.com/shoenig/test/must"
)
func TestExample(t *testing.T) {
nomad := e2eutil.NomadClient(t)
cluster3.Establish(t,
cluster3.Leader(),
cluster3.LinuxClients(1),
cluster3.Timeout(3*time.Second),
)
e2eutil.WaitForLeader(t, nomad)
e2eutil.WaitForNodesReady(t, nomad, 2)
t.Run("TestExample_Simple", testExample_Simple)
t.Run("TestExample_WithCleanup", testExample_WithCleanup)
t.Run("testSleep", testSleep)
t.Run("testNamespace", testNamespace)
t.Run("testEnv", testEnv)
}
func testExample_Simple(t *testing.T) {
t.Logf("Logging %s", t.Name())
out, err := e2eutil.Command("nomad", "node", "status")
require.NoError(t, err, "failed to run `nomad node status`")
rows, err := e2eutil.ParseColumns(out)
require.NoError(t, err, "failed to parse `nomad node status`")
for _, row := range rows {
require.Equal(t, "ready", row["Status"])
}
func testSleep(t *testing.T) {
_, cleanup := jobs3.Submit(t, "./input/sleep.hcl")
t.Cleanup(cleanup)
}
func testExample_WithCleanup(t *testing.T) {
func testNamespace(t *testing.T) {
name := util3.ShortID("example")
t.Logf("Logging %s", t.Name())
nomad := e2eutil.NomadClient(t)
nsCleanup := namespaces3.Create(t, name)
t.Cleanup(nsCleanup)
_, err := e2eutil.Command("nomad", "job", "init", "-short", "./input/example.nomad")
require.NoError(t, err, "failed to run `nomad job init -short`")
t.Cleanup(func() { os.Remove("input/example.nomad") })
jobIDs := []string{}
t.Cleanup(e2eutil.CleanupJobsAndGC(t, &jobIDs))
jobID := "example-" + uuid.Short()
jobIDs = append(jobIDs, jobID)
e2eutil.RegisterAndWaitForAllocs(t, nomad, "./input/example.nomad", jobID, "")
jobs, _, err := nomad.Jobs().List(nil)
require.NoError(t, err)
require.NotEmpty(t, jobs)
_, jobCleanup := jobs3.Submit(t, "./input/sleep.hcl", jobs3.Namespace(name))
t.Cleanup(jobCleanup)
}
func testEnv(t *testing.T) {
job, cleanup := jobs3.Submit(t, "./input/env.hcl", jobs3.WaitComplete("group"))
t.Cleanup(cleanup)
expect := fmt.Sprintf("NOMAD_JOB_ID=%s", job.JobID())
logs := job.TaskLogs("group", "task")
must.StrContains(t, logs.Stdout, expect)
}

39
e2e/example/input/env.hcl Normal file
View File

@@ -0,0 +1,39 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
# This "env" job simply invokes 'env' using raw_exec.
job "env" {
type = "batch"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
group "group" {
reschedule {
attempts = 0
unlimited = false
}
restart {
attempts = 0
mode = "fail"
}
task "task" {
user = "nobody"
driver = "raw_exec"
config {
command = "env"
}
resources {
cpu = 10
memory = 10
}
}
}
}

View File

@@ -0,0 +1,45 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
# This "sleep" job simply invokes 'sleep infinity' using raw_exec. It is great
# for demonstrating features of the Nomad e2e suite with a trivial job spec.
job "sleep" {
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
group "group" {
update {
min_healthy_time = "2s"
}
reschedule {
attempts = 0
unlimited = false
}
restart {
attempts = 0
mode = "fail"
}
task "task" {
user = "nobody"
driver = "raw_exec"
config {
command = "sleep"
args = ["infinity"]
}
resources {
cpu = 10
memory = 10
}
}
}
}

37
e2e/v3/README.md Normal file
View File

@@ -0,0 +1,37 @@
# e2e | v3
(!) These packages are experimental and breaking changes will be made. Also,
expect bugs. Like a lot of bugs. Especially on non-happy paths.
The `e2e/v3/` set of packages provide utilities for creating Nomad e2e tests in
a way that is convenient, reliable, and debuggable.
- `v3/cluster3` - establish and verify the state of the cluster
- `v3/jobs3` - manage nomad jobs and wait for deployments, etc.
- `v3/namespaces3` - manage nomad namespaces
- `v3/util3` - helper methods specific to the `v3` utilities
## Examples
#### simple
The simplest example, where we expect a cluster with a leader and at least one
Linux client in a ready state. The test case will submit the `sleep.hcl` job,
wait for the deployment to become succesfull (or fail / timeout), then cleanup
the job.
```go
func TestExample(t *testing.T) {
cluster3.Establish(t,
cluster3.Leader(),
cluster3.LinuxClients(1),
)
t.Run("testSleep", testSleep)
}
func testSleep(t *testing.T) {
cleanup := jobs3.Submit(t, "./input/sleep.hcl")
t.Cleanup(cleanup)
}
```

218
e2e/v3/cluster3/cluster3.go Normal file
View File

@@ -0,0 +1,218 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package cluster3
import (
"errors"
"fmt"
"os"
"testing"
"time"
consulapi "github.com/hashicorp/consul/api"
nomadapi "github.com/hashicorp/nomad/api"
vaultapi "github.com/hashicorp/vault/api"
"github.com/shoenig/test/must"
"github.com/shoenig/test/wait"
)
type Cluster struct {
t *testing.T
consulClient *consulapi.Client
nomadClient *nomadapi.Client
vaultClient *vaultapi.Client
timeout time.Duration
leaderReady bool
consulReady bool
vaultReady bool
linuxClients int
windowsClients int
}
func (c *Cluster) wait() {
errCh := make(chan error)
statusAPI := c.nomadClient.Status()
nodesAPI := c.nomadClient.Nodes()
consulStatusAPI := c.consulClient.Status()
vaultSysAPI := c.vaultClient.Sys()
waitLeader := wait.InitialSuccess(
wait.Timeout(c.timeout),
wait.Gap(1*time.Second),
wait.TestFunc(func() (bool, error) {
if !c.leaderReady {
return true, nil
}
result, err := statusAPI.Leader()
return result != "", err
}),
)
waitLinuxClients := wait.InitialSuccess(
wait.Timeout(c.timeout),
wait.Gap(1*time.Second),
wait.ErrorFunc(func() error {
if c.linuxClients <= 0 {
return nil
}
queryOpts := &nomadapi.QueryOptions{
Filter: `Attributes["kernel.name"] == "linux"`,
}
nodes, _, err := nodesAPI.List(queryOpts)
if err != nil {
return err
}
eligible := len(nodes)
if eligible < c.linuxClients {
return fmt.Errorf("not enough linux clients, want %d, got %d", c.linuxClients, eligible)
}
return nil
}),
)
waitWindowsClients := wait.InitialSuccess(
wait.Timeout(c.timeout),
wait.Gap(1*time.Second),
wait.ErrorFunc(func() error {
if c.windowsClients <= 0 {
return nil
}
return errors.New("todo: windows")
}),
)
waitConsul := wait.InitialSuccess(
wait.Timeout(c.timeout),
wait.Gap(1*time.Second),
wait.TestFunc(func() (bool, error) {
if !c.consulReady {
return true, nil
}
result, err := consulStatusAPI.Leader()
return result != "", err
}),
)
waitVault := wait.InitialSuccess(
wait.Timeout(c.timeout),
wait.Gap(1*time.Second),
wait.TestFunc(func() (bool, error) {
if !c.vaultReady {
return true, nil
}
result, err := vaultSysAPI.Leader()
if err != nil {
return false, fmt.Errorf("failed to find vault leader: %w", err)
}
if result == nil {
return false, errors.New("empty response for vault leader")
}
return result.ActiveTime.String() != "", nil
}),
)
// todo: generalize
go func() {
err := waitLeader.Run()
errCh <- err
}()
go func() {
err := waitLinuxClients.Run()
errCh <- err
}()
go func() {
err := waitWindowsClients.Run()
errCh <- err
}()
go func() {
err := waitConsul.Run()
errCh <- err
}()
go func() {
err := waitVault.Run()
errCh <- err
}()
for i := 0; i < 5; i++ {
err := <-errCh
must.NoError(c.t, err)
}
}
type Option func(c *Cluster)
func Establish(t *testing.T, opts ...Option) {
c := &Cluster{
t: t,
timeout: 10 * time.Second,
}
for _, opt := range opts {
opt(c)
}
c.setClients()
c.wait()
}
func (c *Cluster) setClients() {
nomadClient, nomadErr := nomadapi.NewClient(nomadapi.DefaultConfig())
must.NoError(c.t, nomadErr, must.Sprint("failed to create nomad api client"))
c.nomadClient = nomadClient
consulClient, consulErr := consulapi.NewClient(consulapi.DefaultConfig())
must.NoError(c.t, consulErr, must.Sprint("failed to create consul api client"))
c.consulClient = consulClient
vConfig := vaultapi.DefaultConfig()
if os.Getenv("VAULT_ADDR") == "" {
vConfig.Address = "http://localhost:8200"
}
vaultClient, vaultErr := vaultapi.NewClient(vConfig)
must.NoError(c.t, vaultErr, must.Sprint("failed to create vault api client"))
c.vaultClient = vaultClient
}
func Timeout(timeout time.Duration) Option {
return func(c *Cluster) {
c.timeout = timeout
}
}
func LinuxClients(count int) Option {
return func(c *Cluster) {
c.linuxClients = count
}
}
func WindowsClients(count int) Option {
panic("not yet implemented")
// return func(c *Cluster) {
// c.windowsClients = count
// }
}
func Leader() Option {
return func(c *Cluster) {
c.leaderReady = true
}
}
func Consul() Option {
return func(c *Cluster) {
c.consulReady = true
}
}
func Vault() Option {
return func(c *Cluster) {
c.vaultReady = true
}
}

433
e2e/v3/jobs3/jobs3.go Normal file
View File

@@ -0,0 +1,433 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package jobs3
import (
"context"
"fmt"
"io"
"math/rand"
"os"
"regexp"
"testing"
"time"
"github.com/hashicorp/go-set"
nomadapi "github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/e2e/v3/util3"
"github.com/hashicorp/nomad/helper/pointer"
"github.com/hashicorp/nomad/jobspec2"
"github.com/shoenig/test"
"github.com/shoenig/test/must"
)
type Submission struct {
t *testing.T
nomadClient *nomadapi.Client
jobSpec string
jobID string
origJobID string
noRandomJobID bool
noCleanup bool
timeout time.Duration
verbose bool
vars *set.Set[string] // key=value
waitComplete *set.Set[string] // groups to wait until complete
inNamespace string
authToken string
}
func (sub *Submission) queryOptions() *nomadapi.QueryOptions {
return &nomadapi.QueryOptions{
Namespace: sub.inNamespace,
AuthToken: sub.authToken,
}
}
type Logs struct {
Stdout string
Stderr string
}
// TaskLogs returns the logs of the given task, using a random allocation of
// the given group.
func (sub *Submission) TaskLogs(group, task string) Logs {
byAlloc := sub.TaskLogsByAlloc(group, task)
must.Positive(sub.t, len(byAlloc), must.Sprintf("no allocations found for %s/%s", group, task))
var result Logs
for _, logs := range byAlloc {
result = logs
break
}
return result
}
// TaskLogsByAlloc returns the logs of the given task, organized by allocation.
func (sub *Submission) TaskLogsByAlloc(group, task string) map[string]Logs {
result := make(map[string]Logs)
// get list of allocs for the job
queryOpts := sub.queryOptions()
jobsAPI := sub.nomadClient.Jobs()
stubs, _, err := jobsAPI.Allocations(sub.jobID, false, queryOpts)
must.NoError(sub.t, err, must.Sprintf("failed to query allocations for %s/%s", group, task))
// get logs for each task in the group allocations
for _, stub := range stubs {
if stub.TaskGroup == group {
result[stub.ID] = sub.getTaskLogs(stub.ID, task)
}
}
return result
}
func (sub *Submission) getTaskLogs(allocID, task string) Logs {
queryOpts := sub.queryOptions()
allocAPI := sub.nomadClient.Allocations()
alloc, _, err := allocAPI.Info(allocID, queryOpts)
must.NoError(sub.t, err, must.Sprintf("failed to query allocation for %s", allocID))
fsAPI := sub.nomadClient.AllocFS()
read := func(path string) string {
rc, err := fsAPI.ReadAt(alloc, path, 0, 0, queryOpts)
must.NoError(sub.t, err, must.Sprintf("failed to read alloc logs for %s", allocID))
b, err := io.ReadAll(rc)
must.NoError(sub.t, err, must.Sprintf("failed to read alloc logs for %s", allocID))
must.NoError(sub.t, rc.Close(), must.Sprint("failed to close log stream"))
return string(b)
}
stdout := fmt.Sprintf("alloc/logs/%s.stdout.0", task)
stderr := fmt.Sprintf("alloc/logs/%s.stderr.0", task)
return Logs{
Stdout: read(stdout),
Stderr: read(stderr),
}
}
// JobID provides the (possibly) randomized jobID associated with this Submission.
func (sub *Submission) JobID() string {
return sub.jobID
}
func (sub *Submission) logf(msg string, args ...any) {
sub.t.Helper()
util3.Log3(sub.t, sub.verbose, msg, args...)
}
func (sub *Submission) cleanup() {
if sub.noCleanup {
return
}
// deregister the job that was submitted
jobsAPI := sub.nomadClient.Jobs()
sub.logf("deregister job %q", sub.jobID)
_, _, err := jobsAPI.Deregister(sub.jobID, true, &nomadapi.WriteOptions{
Namespace: sub.inNamespace,
})
test.NoError(sub.t, err, test.Sprintf("failed to deregister job %q", sub.origJobID))
// force a system gc just in case
sysAPI := sub.nomadClient.System()
sub.logf("system gc")
err = sysAPI.GarbageCollect()
test.NoError(sub.t, err, test.Sprint("failed to gc"))
// todo: should probably loop over the gc until the job is actually gone
}
type Option func(*Submission)
type Cleanup func()
func Submit(t *testing.T, filename string, opts ...Option) (*Submission, Cleanup) {
sub := initialize(t, filename)
for _, opt := range opts {
opt(sub)
}
sub.setClient() // setup base api clients
sub.run() // submit job and wait on deployment
sub.waits() // wait on batch/sysbatch allocations
return sub, sub.cleanup
}
func Namespace(name string) Option {
return func(sub *Submission) {
sub.inNamespace = name
}
}
func AuthToken(token string) Option {
return func(sub *Submission) {
sub.authToken = token
}
}
var (
idRe = regexp.MustCompile(`(?m)^job "(.*)" \{`)
)
func (sub *Submission) run() {
if !sub.noRandomJobID {
sub.jobID = fmt.Sprintf("%s-%03d", sub.origJobID, rand.Int()%1000)
sub.jobSpec = idRe.ReplaceAllString(sub.jobSpec, fmt.Sprintf("job %q {", sub.jobID))
}
parseConfig := &jobspec2.ParseConfig{
// Path
Body: []byte(sub.jobSpec),
AllowFS: true,
ArgVars: sub.vars.Slice(),
// VarFiles
// VarContent
// Envs
// Strict
}
job, err := jobspec2.ParseWithConfig(parseConfig)
must.NoError(sub.t, err, must.Sprint("failed to parse job"))
must.NotNil(sub.t, job)
if job.Type == nil {
job.Type = pointer.Of("service")
}
writeOpts := &nomadapi.WriteOptions{
Namespace: sub.inNamespace,
AuthToken: sub.authToken,
}
jobsAPI := sub.nomadClient.Jobs()
sub.logf("register (%s) job: %q", *job.Type, sub.jobID)
regResp, _, err := jobsAPI.Register(job, writeOpts)
must.NoError(sub.t, err)
evalID := regResp.EvalID
queryOpts := &nomadapi.QueryOptions{
Namespace: sub.inNamespace,
AuthToken: sub.authToken,
}
// setup a context with our submission timeout
ctx, cancel := context.WithTimeout(context.Background(), sub.timeout)
defer cancel()
// we need to go through evals until we find the deployment
evalAPI := sub.nomadClient.Evaluations()
// start eval lookup loop
var deploymentID string
EVAL:
for {
// check if we have passed timeout expiration
select {
case <-ctx.Done():
must.Unreachable(sub.t, must.Sprint("timeout reached waiting for eval"))
default:
}
eval, _, err := evalAPI.Info(evalID, queryOpts)
must.NoError(sub.t, err)
sub.logf("checking eval: %s, status: %s", evalID, eval.Status)
switch eval.Status {
case nomadapi.EvalStatusComplete:
deploymentID = eval.DeploymentID
break EVAL
case nomadapi.EvalStatusFailed:
must.Unreachable(sub.t, must.Sprint("eval failed"))
case nomadapi.EvalStatusCancelled:
must.Unreachable(sub.t, must.Sprint("eval cancelled"))
default:
time.Sleep(1 * time.Second)
}
nextEvalID := eval.NextEval
if nextEvalID != "" {
evalID = nextEvalID
continue
}
}
switch *job.Type {
case "service":
// need to monitor the deployment until it is complete
depAPI := sub.nomadClient.Deployments()
DEPLOY:
for {
// check if we have passed timeout expiration
select {
case <-ctx.Done():
must.Unreachable(sub.t, must.Sprint("timeout reached waiting for deployment"))
default:
}
dep, _, err := depAPI.Info(deploymentID, queryOpts)
must.NoError(sub.t, err)
sub.logf("checking deployment: %s, status: %s", dep.ID, dep.Status)
switch dep.Status {
case nomadapi.DeploymentStatusBlocked:
must.Unreachable(sub.t, must.Sprint("deployment is blocked"))
case nomadapi.DeploymentStatusCancelled:
must.Unreachable(sub.t, must.Sprint("deployment is cancelled"))
case nomadapi.DeploymentStatusFailed:
must.Unreachable(sub.t, must.Sprint("deployment is failed"))
case nomadapi.DeploymentStatusPaused:
must.Unreachable(sub.t, must.Sprint("deployment is paused"))
case nomadapi.DeploymentStatusPending:
break
case nomadapi.DeploymentStatusRunning:
break
case nomadapi.DeploymentStatusSuccessful:
sub.logf("deployment %s was a success", dep.ID)
break DEPLOY
case nomadapi.DeploymentStatusUnblocking:
must.Unreachable(sub.t, must.Sprint("deployment is unblocking"))
default:
break
}
time.Sleep(1 * time.Second)
}
// todo: more job types
default:
}
}
func (sub *Submission) waitAlloc(group, id string) {
queryOpts := sub.queryOptions()
allocAPI := sub.nomadClient.Allocations()
// todo: respect timeout
ALLOCATION:
for {
latest, _, err := allocAPI.Info(id, queryOpts)
must.NoError(sub.t, err)
status := latest.ClientStatus
sub.logf("wait for %q allocation %s, status: %s", group, id, status)
switch status {
case nomadapi.AllocClientStatusLost:
must.Unreachable(sub.t, must.Sprintf("group %q allocation %s lost", group, id))
case nomadapi.AllocClientStatusFailed:
must.Unreachable(sub.t, must.Sprintf("group %q allocation %s failed", group, id))
case nomadapi.AllocClientStatusPending:
break
case nomadapi.AllocClientStatusRunning:
break
case nomadapi.AllocClientStatusComplete:
break ALLOCATION
}
time.Sleep(1 * time.Second)
}
}
func (sub *Submission) waits() {
queryOpts := sub.queryOptions()
jobsAPI := sub.nomadClient.Jobs()
allocations, _, err := jobsAPI.Allocations(sub.jobID, false, queryOpts)
must.NoError(sub.t, err)
// for each alloc, if this is an alloc we want to wait on, wait on it
for _, alloc := range allocations {
id := alloc.ID
group := alloc.TaskGroup
if sub.waitComplete.Contains(group) {
sub.waitAlloc(group, id)
}
}
}
func (sub *Submission) setClient() {
nomadClient, nomadErr := nomadapi.NewClient(nomadapi.DefaultConfig())
must.NoError(sub.t, nomadErr, must.Sprint("failed to create nomad api client"))
sub.nomadClient = nomadClient
}
func initialize(t *testing.T, filename string) *Submission {
b, err := os.ReadFile(filename)
must.NoError(t, err, must.Sprintf("failed to read job file %q", filename))
job := string(b)
jobID := idRe.FindStringSubmatch(job)[1]
must.NotEq(t, "", jobID, must.Sprintf("could not find job id in %q", filename))
return &Submission{
t: t,
jobSpec: job,
jobID: jobID,
origJobID: jobID,
timeout: 20 * time.Second,
vars: set.New[string](0),
waitComplete: set.New[string](0),
}
}
func DisableRandomJobID() Option {
return func(sub *Submission) {
sub.noRandomJobID = true
}
}
func DisableCleanup() Option {
return func(sub *Submission) {
sub.noCleanup = true
}
}
func Timeout(timeout time.Duration) Option {
return func(c *Submission) {
c.timeout = timeout
}
}
// Verbose will turn on verbose logging.
func Verbose(on bool) Option {
return func(c *Submission) {
c.verbose = on
}
}
// Set an HCL variable.
func Var(key, value string) Option {
return func(sub *Submission) {
sub.vars.Insert(fmt.Sprintf("%s=%s", key, value))
}
}
// WaitComplete will wait until all allocations of the given group are
// in the "complete" state (or timeout, or terminal with another status).
func WaitComplete(group string) Option {
return func(sub *Submission) {
sub.waitComplete.Insert(group)
}
}
// SkipEvalComplete will skip waiting for the evaluation(s) to be complete.
//
// Implies SkipDeploymentHealthy.
func SkipEvalComplete() Option {
panic("not yet implemented")
}
// SkipDeploymentHealthy will skip waiting for the deployment to become
// healthy.
func SkipDeploymentHealthy() Option {
panic("not yet implemented")
}

View File

@@ -0,0 +1,166 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package namespaces3
import (
"testing"
"time"
"github.com/hashicorp/go-set"
nomadapi "github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/e2e/v3/util3"
"github.com/hashicorp/nomad/helper"
"github.com/shoenig/test"
"github.com/shoenig/test/must"
)
type Names struct {
t *testing.T
nomadClient *nomadapi.Client
noCleanup bool
timeout time.Duration
verbose bool
apply *set.HashSet[*Namespace, string]
remove *set.Set[string]
}
func (g *Names) logf(msg string, args ...any) {
util3.Log3(g.t, g.verbose, msg, args...)
}
func (g *Names) cleanup() {
if g.noCleanup {
return
}
namespaceAPI := g.nomadClient.Namespaces()
// remove any namespaces we created (or updated)
for _, namespace := range g.apply.Slice() {
name := namespace.Name
g.logf("cleanup namespace %q", name)
_, err := namespaceAPI.Delete(name, nil)
test.NoError(g.t, err, test.Sprintf("unable to delete namespace %q", name))
}
}
type Option func(*Names)
type Cleanup func()
type Namespace struct {
Name string
Description string
}
func (ns *Namespace) Hash() string {
return ns.Name
}
func (ns *Namespace) String() string {
return ns.Name
}
func (n *Names) setClient() {
nomadClient, nomadErr := nomadapi.NewClient(nomadapi.DefaultConfig())
must.NoError(n.t, nomadErr, must.Sprint("failed to create nomad api client"))
n.nomadClient = nomadClient
}
func configure(t *testing.T, opts ...Option) Cleanup {
g := &Names{
t: t,
timeout: 10 * time.Second,
apply: set.NewHashSet[*Namespace, string](3),
remove: set.New[string](3),
}
for _, opt := range opts {
opt(g)
}
g.setClient()
g.run()
return g.cleanup
}
func (g *Names) run() {
namespacesAPI := g.nomadClient.Namespaces()
// do deletions
for _, namespace := range g.remove.Slice() {
g.logf("delete namespace %q", namespace)
_, err := namespacesAPI.Delete(namespace, nil)
must.NoError(g.t, err)
}
// do applies
for _, namespace := range g.apply.Slice() {
g.logf("apply namespace %q", namespace)
_, err := namespacesAPI.Register(&nomadapi.Namespace{
Name: namespace.Name,
Description: namespace.Description,
}, nil)
must.NoError(g.t, err)
}
}
// Create a namespace of the given name.
func Create(t *testing.T, name string, opts ...Option) Cleanup {
namespace := &Namespace{Name: name}
opt := apply(namespace)
return configure(t, append(opts, opt)...)
}
// Create namespaces of the given names.
func CreateN(t *testing.T, names []string, opts ...Option) Cleanup {
creations := helper.ConvertSlice(names, func(name string) Option {
namespace := &Namespace{Name: name}
return apply(namespace)
})
return configure(t, append(opts, creations...)...)
}
// Delete the namespace of the given name.
func Delete(t *testing.T, name string, opts ...Option) Cleanup {
opt := remove(name)
return configure(t, append(opts, opt)...)
}
func apply(namespace *Namespace) Option {
return func(g *Names) {
g.apply.Insert(namespace)
}
}
func remove(name string) Option {
return func(g *Names) {
g.remove.Insert(name)
}
}
// DisableCleanup will disable the automatic removal of any namespaces
// created using the Create() Option.
func DisableCleanup() Option {
return func(n *Names) {
n.noCleanup = true
}
}
func Timeout(timeout time.Duration) Option {
return func(n *Names) {
n.timeout = timeout
}
}
// Verbose will enable verbose logging.
func Verbose(on bool) Option {
return func(n *Names) {
n.verbose = on
}
}

27
e2e/v3/util3/util3.go Normal file
View File

@@ -0,0 +1,27 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package util3
import (
"fmt"
"math/rand"
"os"
"testing"
)
func ShortID(prefix string) string {
num := rand.Int31() % 1000
return fmt.Sprintf("%s-%03d", prefix, num)
}
// Log3 is a helper for verbose logging in e2e/v3 packages.
//
// Do not call this directly from tests.
func Log3(t *testing.T, verbose bool, msg string, args ...any) {
env := os.Getenv("NOMAD_E2E_VERBOSE")
on := verbose || env == "1" || env == "true"
if on {
t.Logf(msg, args...)
}
}