e2e: add nsd simple load balancing test

This commit is contained in:
Seth Hoenig
2022-07-14 15:04:22 -05:00
parent b7cfbd8e9e
commit 2d83f130fe
4 changed files with 238 additions and 11 deletions

View File

@@ -0,0 +1,55 @@
job "simple_lb_clients" {
datacenters = ["dc1"]
type = "batch"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
group "client_1" {
task "cat" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "sleep 15000"]
}
resources {
cpu = 10
memory = 16
}
template {
destination = "output.txt"
data = <<EOH
{{$allocID := env "NOMAD_ALLOC_ID" -}}
{{range nomadService 1 $allocID "db"}}
server {{ .Address }}:{{ .Port }}
{{- end}}
EOH
}
}
}
group "client_2" {
task "cat" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "sleep 15000"]
}
resources {
cpu = 10
memory = 16
}
template {
destination = "output.txt"
data = <<EOH
{{$allocID := env "NOMAD_ALLOC_ID" -}}
{{range nomadService 2 $allocID "db"}}
server {{ .Address }}:{{ .Port }}
{{- end}}
EOH
}
}
}
}

View File

@@ -0,0 +1,81 @@
job "simple_lb_replicas" {
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
group "db_replica_1" {
network {
mode = "bridge"
port "db_port" {}
}
service {
name = "db"
tags = ["r1"]
port = "db_port"
provider = "nomad"
}
task "db" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "sleep 15000"]
}
resources {
cpu = 10
memory = 16
}
}
}
group "db_replica_2" {
network {
mode = "bridge"
port "db_port" {}
}
service {
name = "db"
tags = ["r2"]
port = "db_port"
provider = "nomad"
}
task "db" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "sleep 15000"]
}
resources {
cpu = 10
memory = 16
}
}
}
group "db_replica_3" {
network {
mode = "bridge"
port "db_port" {}
}
service {
name = "db"
tags = ["r3"]
port = "db_port"
provider = "nomad"
}
task "db" {
driver = "raw_exec"
config {
command = "bash"
args = ["-c", "sleep 15000"]
}
resources {
cpu = 10
memory = 16
}
}
}
}

View File

@@ -11,12 +11,15 @@ import (
"github.com/hashicorp/nomad/e2e/e2eutil"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
)
const (
jobNomadProvider = "./input/nomad_provider.nomad"
jobConsulProvider = "./input/consul_provider.nomad"
jobMultiProvider = "./input/multi_provider.nomad"
jobNomadProvider = "./input/nomad_provider.nomad"
jobConsulProvider = "./input/consul_provider.nomad"
jobMultiProvider = "./input/multi_provider.nomad"
jobSimpleLBReplicas = "./input/simple_lb_replicas.nomad"
jobSimpleLBClients = "./input/simple_lb_clients.nomad"
)
const (
@@ -37,6 +40,7 @@ func TestServiceDiscovery(t *testing.T) {
// Run our test cases.
t.Run("TestServiceDiscovery_MultiProvider", testMultiProvider)
t.Run("TestServiceDiscovery_UpdateProvider", testUpdateProvider)
t.Run("TestServiceDiscovery_SimpleLoadBalancing", testSimpleLoadBalancing)
}
// testMultiProvider tests service discovery where multi providers are used
@@ -47,7 +51,7 @@ func testMultiProvider(t *testing.T) {
consulClient := e2eutil.ConsulClient(t)
// Generate our job ID which will be used for the entire test.
jobID := "service-discovery-multi-provider-" + uuid.Generate()[:8]
jobID := "service-discovery-multi-provider-" + uuid.Short()
jobIDs := []string{jobID}
// Defer a cleanup function to remove the job. This will trigger if the
@@ -92,7 +96,7 @@ func testMultiProvider(t *testing.T) {
AllocID: nomadProviderAllocID,
Tags: []string{"foo", "bar"},
}
requireEventuallyNomadService(t, &expectedNomadService)
requireEventuallyNomadService(t, &expectedNomadService, "")
// Lookup the service registration in Consul and assert this matches what
// we expected.
@@ -205,7 +209,7 @@ func testUpdateProvider(t *testing.T) {
const serviceName = "http-api"
// Generate our job ID which will be used for the entire test.
jobID := "service-discovery-update-provider-" + uuid.Generate()[:8]
jobID := "service-discovery-update-provider-" + uuid.Short()
jobIDs := []string{jobID}
// Defer a cleanup function to remove the job. This will trigger if the
@@ -241,7 +245,7 @@ func testUpdateProvider(t *testing.T) {
AllocID: nomadProviderAllocID,
Tags: []string{"foo", "bar"},
}
requireEventuallyNomadService(t, &expectedNomadService)
requireEventuallyNomadService(t, &expectedNomadService, "")
}
nomadServiceTestFn()
@@ -320,17 +324,25 @@ func testUpdateProvider(t *testing.T) {
// against Nomad for a single service. Test cases which expect more than a
// single response should implement their own assertion, to handle ordering
// problems.
func requireEventuallyNomadService(t *testing.T, expected *api.ServiceRegistration) {
func requireEventuallyNomadService(t *testing.T, expected *api.ServiceRegistration, filter string) {
opts := (*api.QueryOptions)(nil)
if filter != "" {
opts = &api.QueryOptions{
Filter: filter,
}
}
require.Eventually(t, func() bool {
services, _, err := e2eutil.NomadClient(t).Services().Get(expected.ServiceName, nil)
services, _, err := e2eutil.NomadClient(t).Services().Get(expected.ServiceName, opts)
if err != nil {
return false
}
// Perform the checks.
if len(services) != 1 {
return false
}
// ensure each matching service meets expectations
if services[0].ServiceName != expected.ServiceName {
return false
}
@@ -346,6 +358,11 @@ func requireEventuallyNomadService(t *testing.T, expected *api.ServiceRegistrati
if services[0].AllocID != expected.AllocID {
return false
}
return reflect.DeepEqual(services[0].Tags, expected.Tags)
if !slices.Equal(services[0].Tags, expected.Tags) {
return false
}
return true
}, defaultWaitForTime, defaultTickTime)
}

View File

@@ -0,0 +1,74 @@
package servicediscovery
import (
"context"
"fmt"
"strings"
"testing"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/e2e/e2eutil"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/shoenig/test/must"
)
func testSimpleLoadBalancing(t *testing.T) {
nomadClient := e2eutil.NomadClient(t)
// Generate our job ID which will be used for the entire test.
jobID := "nsd-simple-lb-replicas" + uuid.Short()
jobIDs := []string{jobID}
// Defer a cleanup function to remove the job. This will trigger if the
// test fails, unless the cancel function is called.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
defer e2eutil.CleanupJobsAndGCWithContext(t, ctx, &jobIDs)
// Register the replicas job.
allocStubs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient, jobSimpleLBReplicas, jobID, "")
must.Len(t, 3, allocStubs)
for _, stub := range allocStubs {
var tag string
switch stub.TaskGroup {
case "db_replica_1":
tag = "r1"
case "db_replica_2":
tag = "r2"
case "db_replica_3":
tag = "r3"
}
expectService := api.ServiceRegistration{
ServiceName: "db",
Namespace: api.DefaultNamespace,
Datacenter: "dc1",
JobID: jobID,
AllocID: stub.ID,
Tags: []string{tag},
}
filter := fmt.Sprintf("Tags contains %q", tag)
requireEventuallyNomadService(t, &expectService, filter)
}
jobID2 := "nsd-simple-lb-clients" + uuid.Short()
jobIDs = append(jobIDs, jobID2)
// Register the clients job.
allocStubs = e2eutil.RegisterAndWaitForAllocs(t, nomadClient, jobSimpleLBClients, jobID2, "")
must.Len(t, 2, allocStubs)
for _, stub := range allocStubs {
var expCount int
switch stub.TaskGroup {
case "client_1":
expCount = 1
case "client_2":
expCount = 2
}
must.NoError(t, e2eutil.WaitForAllocFile(stub.ID, "cat/output.txt", func(content string) bool {
count := strings.Count(content, "server ")
return count == expCount
}, nil))
}
}