mirror of
https://github.com/kemko/nomad.git
synced 2026-01-06 18:35:44 +03:00
e2e: add test for multiple sevice sidecars in the same alloc
This commit is contained in:
50
e2e/connect/input/multi-service.nomad
Normal file
50
e2e/connect/input/multi-service.nomad
Normal file
@@ -0,0 +1,50 @@
|
||||
job "multi-service" {
|
||||
datacenters = ["dc1"]
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "test" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "echo1"
|
||||
port = "9001"
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
||||
|
||||
task "echo1" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "hashicorp/http-echo"
|
||||
args = ["-listen=:9001", "-text=echo1"]
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "echo2"
|
||||
port = "9002"
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
||||
|
||||
task "echo2" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "hashicorp/http-echo"
|
||||
args = ["-listen=:9002", "-text=echo2"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
133
e2e/connect/multi_service.go
Normal file
133
e2e/connect/multi_service.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package connect
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/jobspec"
|
||||
"github.com/kr/pretty"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestMultiServiceConnect tests running multiple envoy sidecars in the same allocation.
|
||||
func (tc *ConnectE2ETest) TestMultiServiceConnect(f *framework.F) {
|
||||
t := f.T()
|
||||
uuid := uuid.Generate()
|
||||
jobID := "connect" + uuid[0:8]
|
||||
tc.jobIds = append(tc.jobIds, jobID)
|
||||
jobapi := tc.Nomad().Jobs()
|
||||
|
||||
job, err := jobspec.ParseFile("connect/input/multi-service.nomad")
|
||||
require.NoError(t, err)
|
||||
job.ID = &jobID
|
||||
|
||||
resp, _, err := jobapi.Register(job, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
require.Zero(t, resp.Warnings)
|
||||
|
||||
EVAL:
|
||||
qopts := &api.QueryOptions{
|
||||
WaitIndex: resp.EvalCreateIndex,
|
||||
}
|
||||
evalapi := tc.Nomad().Evaluations()
|
||||
eval, qmeta, err := evalapi.Info(resp.EvalID, qopts)
|
||||
require.NoError(t, err)
|
||||
qopts.WaitIndex = qmeta.LastIndex
|
||||
|
||||
switch eval.Status {
|
||||
case "pending":
|
||||
goto EVAL
|
||||
case "complete":
|
||||
// Ok!
|
||||
case "failed", "canceled", "blocked":
|
||||
t.Fatalf("eval %s\n%s\n", eval.Status, pretty.Sprint(eval))
|
||||
default:
|
||||
t.Fatalf("unknown eval status: %s\n%s\n", eval.Status, pretty.Sprint(eval))
|
||||
}
|
||||
|
||||
// Assert there were 0 placement failures
|
||||
require.Zero(t, eval.FailedTGAllocs, pretty.Sprint(eval.FailedTGAllocs))
|
||||
require.Len(t, eval.QueuedAllocations, 1, pretty.Sprint(eval.QueuedAllocations))
|
||||
|
||||
// Assert allocs are running
|
||||
require.Eventually(t, func() bool {
|
||||
allocs, qmeta, err := evalapi.Allocations(eval.ID, qopts)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, allocs, 1)
|
||||
qopts.WaitIndex = qmeta.LastIndex
|
||||
|
||||
running := 0
|
||||
for _, alloc := range allocs {
|
||||
switch alloc.ClientStatus {
|
||||
case "running":
|
||||
running++
|
||||
case "pending":
|
||||
// keep trying
|
||||
default:
|
||||
t.Fatalf("alloc failed: %s", pretty.Sprint(alloc))
|
||||
}
|
||||
}
|
||||
|
||||
if running == len(allocs) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}, 10*time.Second, 500*time.Millisecond)
|
||||
|
||||
allocs, _, err := evalapi.Allocations(eval.ID, qopts)
|
||||
require.NoError(t, err)
|
||||
allocIDs := make(map[string]bool, 1)
|
||||
for _, a := range allocs {
|
||||
if a.ClientStatus != "running" || a.DesiredStatus != "run" {
|
||||
t.Fatalf("alloc %s (%s) terminal; client=%s desired=%s", a.TaskGroup, a.ID, a.ClientStatus, a.DesiredStatus)
|
||||
}
|
||||
allocIDs[a.ID] = true
|
||||
}
|
||||
|
||||
// Check Consul service health
|
||||
agentapi := tc.Consul().Agent()
|
||||
|
||||
failing := map[string]*consulapi.AgentCheck{}
|
||||
require.Eventually(t, func() bool {
|
||||
checks, err := agentapi.Checks()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Filter out checks for other services
|
||||
for cid, check := range checks {
|
||||
found := false
|
||||
for allocID := range allocIDs {
|
||||
if strings.Contains(check.ServiceID, allocID) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
delete(checks, cid)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure checks are all passing
|
||||
failing = map[string]*consulapi.AgentCheck{}
|
||||
for _, check := range checks {
|
||||
if check.Status != "passing" {
|
||||
failing[check.CheckID] = check
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(failing) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
t.Logf("still %d checks not passing", len(failing))
|
||||
return false
|
||||
}, time.Minute, time.Second)
|
||||
|
||||
require.Len(t, failing, 0, pretty.Sprint(failing))
|
||||
}
|
||||
Reference in New Issue
Block a user