diff --git a/CHANGELOG.md b/CHANGELOG.md
index 10e26b07f..6bb341fcb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,8 +1,9 @@
## 1.1.0 (Unreleased)
FEATURES:
- * core (Enterprise): Support loading Enterprise license from disk or environment. [[GH-10216](https://github.com/hashicorp/nomad/issues/10216)]
- * deployments: Adds `service` and `check` `on_update` configuration to support liveness and readiness checks. [[GH-9955](https://github.com/hashicorp/nomad/issues/9955)]
+ * **Consul Namespaces**: Adds support for Consul Namespaces [[GH-10235](https://github.com/hashicorp/nomad/pull/10235)]
+ * **Licensing (Enterprise)**: Support loading Enterprise license from disk or environment. [[GH-10216](https://github.com/hashicorp/nomad/issues/10216)]
+ * **Readiness Checks**: Adds `service` and `check` `on_update` configuration to support liveness and readiness checks. [[GH-9955](https://github.com/hashicorp/nomad/issues/9955)]
IMPROVEMENTS:
* api: Removed unimplemented `CSIVolumes.PluginList` API. [[GH-10158](https://github.com/hashicorp/nomad/issues/10158)]
diff --git a/api/consul.go b/api/consul.go
new file mode 100644
index 000000000..64e085e61
--- /dev/null
+++ b/api/consul.go
@@ -0,0 +1,35 @@
+package api
+
+// Consul represents configuration related to consul.
+type Consul struct {
+ // (Enterprise-only) Namespace represents a Consul namespace.
+ Namespace string `mapstructure:"namespace" hcl:"namespace,optional"`
+}
+
+// Canonicalize Consul into a canonical form. The Canonicalize structs containing
+// a Consul should ensure it is not nil.
+func (c *Consul) Canonicalize() {
+ // Nothing to do here.
+ //
+ // If Namespace is nil, that is a choice of the job submitter that
+ // we should inherit from higher up (i.e. job<-group). Likewise, if
+ // Namespace is set but empty, that is a choice to use the default consul
+ // namespace.
+}
+
+// Copy creates a deep copy of c.
+func (c *Consul) Copy() *Consul {
+ return &Consul{
+ Namespace: c.Namespace,
+ }
+}
+
+// MergeNamespace sets Namespace to namespace if not already configured.
+// This is used to inherit the job-level consul_namespace if the group-level
+// namespace is not explicitly configured.
+func (c *Consul) MergeNamespace(namespace *string) {
+ // only inherit namespace from above if not already set
+ if c.Namespace == "" && namespace != nil {
+ c.Namespace = *namespace
+ }
+}
diff --git a/api/consul_test.go b/api/consul_test.go
new file mode 100644
index 000000000..2b1145fe9
--- /dev/null
+++ b/api/consul_test.go
@@ -0,0 +1,58 @@
+package api
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestConsul_Canonicalize(t *testing.T) {
+ t.Run("missing ns", func(t *testing.T) {
+ c := new(Consul)
+ c.Canonicalize()
+ require.Empty(t, c.Namespace)
+ })
+
+ t.Run("complete", func(t *testing.T) {
+ c := &Consul{Namespace: "foo"}
+ c.Canonicalize()
+ require.Equal(t, "foo", c.Namespace)
+ })
+}
+
+func TestConsul_Copy(t *testing.T) {
+ t.Run("complete", func(t *testing.T) {
+ result := (&Consul{
+ Namespace: "foo",
+ }).Copy()
+ require.Equal(t, &Consul{
+ Namespace: "foo",
+ }, result)
+ })
+}
+
+func TestConsul_MergeNamespace(t *testing.T) {
+ t.Run("already set", func(t *testing.T) {
+ a := &Consul{Namespace: "foo"}
+ ns := stringToPtr("bar")
+ a.MergeNamespace(ns)
+ require.Equal(t, "foo", a.Namespace)
+ require.Equal(t, "bar", *ns)
+ })
+
+ t.Run("inherit", func(t *testing.T) {
+ a := &Consul{Namespace: ""}
+ ns := stringToPtr("bar")
+ a.MergeNamespace(ns)
+ require.Equal(t, "bar", a.Namespace)
+ require.Equal(t, "bar", *ns)
+ })
+
+ t.Run("parent is nil", func(t *testing.T) {
+ a := &Consul{Namespace: "foo"}
+ ns := (*string)(nil)
+ a.MergeNamespace(ns)
+ require.Equal(t, "foo", a.Namespace)
+ require.Nil(t, ns)
+ })
+}
diff --git a/api/jobs.go b/api/jobs.go
index d65544519..1fed52c7c 100644
--- a/api/jobs.go
+++ b/api/jobs.go
@@ -817,6 +817,7 @@ type Job struct {
ParentID *string
Dispatched bool
Payload []byte
+ ConsulNamespace *string `mapstructure:"consul_namespace"`
VaultNamespace *string `mapstructure:"vault_namespace"`
NomadTokenID *string `mapstructure:"nomad_token_id"`
Status *string
@@ -878,6 +879,9 @@ func (j *Job) Canonicalize() {
if j.ConsulToken == nil {
j.ConsulToken = stringToPtr("")
}
+ if j.ConsulNamespace == nil {
+ j.ConsulNamespace = stringToPtr("")
+ }
if j.VaultToken == nil {
j.VaultToken = stringToPtr("")
}
diff --git a/api/jobs_test.go b/api/jobs_test.go
index 0453f033a..250f33717 100644
--- a/api/jobs_test.go
+++ b/api/jobs_test.go
@@ -243,6 +243,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Priority: intToPtr(50),
AllAtOnce: boolToPtr(false),
ConsulToken: stringToPtr(""),
+ ConsulNamespace: stringToPtr(""),
VaultToken: stringToPtr(""),
VaultNamespace: stringToPtr(""),
NomadTokenID: stringToPtr(""),
@@ -288,6 +289,9 @@ func TestJobs_Canonicalize(t *testing.T) {
MaxDelay: timeToPtr(1 * time.Hour),
Unlimited: boolToPtr(true),
},
+ Consul: &Consul{
+ Namespace: "",
+ },
Update: &UpdateStrategy{
Stagger: timeToPtr(30 * time.Second),
MaxParallel: intToPtr(1),
@@ -334,6 +338,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Priority: intToPtr(50),
AllAtOnce: boolToPtr(false),
ConsulToken: stringToPtr(""),
+ ConsulNamespace: stringToPtr(""),
VaultToken: stringToPtr(""),
VaultNamespace: stringToPtr(""),
NomadTokenID: stringToPtr(""),
@@ -368,6 +373,9 @@ func TestJobs_Canonicalize(t *testing.T) {
MaxDelay: timeToPtr(0),
Unlimited: boolToPtr(false),
},
+ Consul: &Consul{
+ Namespace: "",
+ },
Tasks: []*Task{
{
KillTimeout: timeToPtr(5 * time.Second),
@@ -408,6 +416,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Priority: intToPtr(50),
AllAtOnce: boolToPtr(false),
ConsulToken: stringToPtr(""),
+ ConsulNamespace: stringToPtr(""),
VaultToken: stringToPtr(""),
VaultNamespace: stringToPtr(""),
NomadTokenID: stringToPtr(""),
@@ -453,6 +462,9 @@ func TestJobs_Canonicalize(t *testing.T) {
MaxDelay: timeToPtr(1 * time.Hour),
Unlimited: boolToPtr(true),
},
+ Consul: &Consul{
+ Namespace: "",
+ },
Update: &UpdateStrategy{
Stagger: timeToPtr(30 * time.Second),
MaxParallel: intToPtr(1),
@@ -575,6 +587,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Type: stringToPtr("service"),
AllAtOnce: boolToPtr(false),
ConsulToken: stringToPtr(""),
+ ConsulNamespace: stringToPtr(""),
VaultToken: stringToPtr(""),
VaultNamespace: stringToPtr(""),
NomadTokenID: stringToPtr(""),
@@ -621,7 +634,9 @@ func TestJobs_Canonicalize(t *testing.T) {
Migrate: boolToPtr(false),
SizeMB: intToPtr(300),
},
-
+ Consul: &Consul{
+ Namespace: "",
+ },
Update: &UpdateStrategy{
Stagger: timeToPtr(30 * time.Second),
MaxParallel: intToPtr(1),
@@ -737,6 +752,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Priority: intToPtr(50),
AllAtOnce: boolToPtr(false),
ConsulToken: stringToPtr(""),
+ ConsulNamespace: stringToPtr(""),
VaultToken: stringToPtr(""),
VaultNamespace: stringToPtr(""),
NomadTokenID: stringToPtr(""),
@@ -789,6 +805,9 @@ func TestJobs_Canonicalize(t *testing.T) {
TaskGroups: []*TaskGroup{
{
Name: stringToPtr("bar"),
+ Consul: &Consul{
+ Namespace: "",
+ },
Update: &UpdateStrategy{
Stagger: timeToPtr(2 * time.Second),
MaxParallel: intToPtr(2),
@@ -824,6 +843,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Priority: intToPtr(50),
AllAtOnce: boolToPtr(false),
ConsulToken: stringToPtr(""),
+ ConsulNamespace: stringToPtr(""),
VaultToken: stringToPtr(""),
VaultNamespace: stringToPtr(""),
NomadTokenID: stringToPtr(""),
@@ -869,6 +889,9 @@ func TestJobs_Canonicalize(t *testing.T) {
MaxDelay: timeToPtr(1 * time.Hour),
Unlimited: boolToPtr(true),
},
+ Consul: &Consul{
+ Namespace: "",
+ },
Update: &UpdateStrategy{
Stagger: timeToPtr(2 * time.Second),
MaxParallel: intToPtr(2),
@@ -913,6 +936,9 @@ func TestJobs_Canonicalize(t *testing.T) {
MaxDelay: timeToPtr(1 * time.Hour),
Unlimited: boolToPtr(true),
},
+ Consul: &Consul{
+ Namespace: "",
+ },
Update: &UpdateStrategy{
Stagger: timeToPtr(1 * time.Second),
MaxParallel: intToPtr(1),
@@ -972,6 +998,9 @@ func TestJobs_Canonicalize(t *testing.T) {
Interval: timeToPtr(30 * time.Minute),
Mode: stringToPtr("fail"),
},
+ Consul: &Consul{
+ Namespace: "",
+ },
Tasks: []*Task{
{
Name: "task1",
@@ -990,6 +1019,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Priority: intToPtr(50),
AllAtOnce: boolToPtr(false),
ConsulToken: stringToPtr(""),
+ ConsulNamespace: stringToPtr(""),
VaultToken: stringToPtr(""),
VaultNamespace: stringToPtr(""),
NomadTokenID: stringToPtr(""),
@@ -1035,6 +1065,9 @@ func TestJobs_Canonicalize(t *testing.T) {
MaxDelay: timeToPtr(1 * time.Hour),
Unlimited: boolToPtr(true),
},
+ Consul: &Consul{
+ Namespace: "",
+ },
Update: &UpdateStrategy{
Stagger: timeToPtr(30 * time.Second),
MaxParallel: intToPtr(1),
@@ -1084,6 +1117,9 @@ func TestJobs_Canonicalize(t *testing.T) {
MaxDelay: timeToPtr(1 * time.Hour),
Unlimited: boolToPtr(true),
},
+ Consul: &Consul{
+ Namespace: "",
+ },
Update: &UpdateStrategy{
Stagger: timeToPtr(30 * time.Second),
MaxParallel: intToPtr(1),
@@ -1154,6 +1190,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Priority: intToPtr(50),
AllAtOnce: boolToPtr(false),
ConsulToken: stringToPtr(""),
+ ConsulNamespace: stringToPtr(""),
VaultToken: stringToPtr(""),
VaultNamespace: stringToPtr(""),
NomadTokenID: stringToPtr(""),
diff --git a/api/tasks.go b/api/tasks.go
index b5c6ba119..9757518b9 100644
--- a/api/tasks.go
+++ b/api/tasks.go
@@ -430,6 +430,7 @@ type TaskGroup struct {
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"`
StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"`
Scaling *ScalingPolicy `hcl:"scaling,block"`
+ Consul *Consul `hcl:"consul,block"`
}
// NewTaskGroup creates a new TaskGroup.
@@ -462,6 +463,13 @@ func (g *TaskGroup) Canonicalize(job *Job) {
g.EphemeralDisk.Canonicalize()
}
+ // Merge job.consul onto group.consul
+ if g.Consul == nil {
+ g.Consul = new(Consul)
+ }
+ g.Consul.MergeNamespace(job.ConsulNamespace)
+ g.Consul.Canonicalize()
+
// Merge the update policy from the job
if ju, tu := job.Update != nil, g.Update != nil; ju && tu {
// Merge the jobs and task groups definition of the update strategy
diff --git a/api/tasks_test.go b/api/tasks_test.go
index 844972fe5..b922123f7 100644
--- a/api/tasks_test.go
+++ b/api/tasks_test.go
@@ -715,11 +715,10 @@ func TestSpread_Canonicalize(t *testing.T) {
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
- require := require.New(t)
tg.Spreads = []*Spread{tc.spread}
tg.Canonicalize(job)
for _, spr := range tg.Spreads {
- require.Equal(tc.expectedWeight, *spr.Weight)
+ require.Equal(t, tc.expectedWeight, *spr.Weight)
}
})
}
@@ -788,3 +787,56 @@ func Test_NewDefaultReschedulePolicy(t *testing.T) {
})
}
}
+
+func TestTaskGroup_Canonicalize_Consul(t *testing.T) {
+ t.Run("override job consul in group", func(t *testing.T) {
+ job := &Job{
+ ID: stringToPtr("job"),
+ ConsulNamespace: stringToPtr("ns1"),
+ }
+ job.Canonicalize()
+
+ tg := &TaskGroup{
+ Name: stringToPtr("group"),
+ Consul: &Consul{Namespace: "ns2"},
+ }
+ tg.Canonicalize(job)
+
+ require.Equal(t, "ns1", *job.ConsulNamespace)
+ require.Equal(t, "ns2", tg.Consul.Namespace)
+ })
+
+ t.Run("inherit job consul in group", func(t *testing.T) {
+ job := &Job{
+ ID: stringToPtr("job"),
+ ConsulNamespace: stringToPtr("ns1"),
+ }
+ job.Canonicalize()
+
+ tg := &TaskGroup{
+ Name: stringToPtr("group"),
+ Consul: nil, // not set, inherit from job
+ }
+ tg.Canonicalize(job)
+
+ require.Equal(t, "ns1", *job.ConsulNamespace)
+ require.Equal(t, "ns1", tg.Consul.Namespace)
+ })
+
+ t.Run("set in group only", func(t *testing.T) {
+ job := &Job{
+ ID: stringToPtr("job"),
+ ConsulNamespace: nil,
+ }
+ job.Canonicalize()
+
+ tg := &TaskGroup{
+ Name: stringToPtr("group"),
+ Consul: &Consul{Namespace: "ns2"},
+ }
+ tg.Canonicalize(job)
+
+ require.Empty(t, job.ConsulNamespace)
+ require.Equal(t, "ns2", tg.Consul.Namespace)
+ })
+}
diff --git a/client/allocrunner/alloc_runner_hooks.go b/client/allocrunner/alloc_runner_hooks.go
index 2ad59febf..4a1b0571e 100644
--- a/client/allocrunner/alloc_runner_hooks.go
+++ b/client/allocrunner/alloc_runner_hooks.go
@@ -145,6 +145,7 @@ func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error {
newGroupServiceHook(groupServiceHookConfig{
alloc: alloc,
consul: ar.consulClient,
+ consulNamespace: alloc.ConsulNamespace(),
restarter: ar,
taskEnvBuilder: taskenv.NewBuilder(config.Node, ar.Alloc(), nil, config.Region).SetAllocDir(ar.allocDir.AllocDir),
networkStatusGetter: ar,
diff --git a/client/allocrunner/groupservice_hook.go b/client/allocrunner/groupservice_hook.go
index 93b1457d4..065c94347 100644
--- a/client/allocrunner/groupservice_hook.go
+++ b/client/allocrunner/groupservice_hook.go
@@ -12,6 +12,10 @@ import (
"github.com/hashicorp/nomad/nomad/structs"
)
+const (
+ groupServiceHookName = "group_services"
+)
+
type networkStatusGetter interface {
NetworkStatus() *structs.AllocNetworkStatus
}
@@ -23,6 +27,7 @@ type groupServiceHook struct {
group string
restarter agentconsul.WorkloadRestarter
consulClient consul.ConsulServiceAPI
+ consulNamespace string
prerun bool
delay time.Duration
deregistered bool
@@ -45,6 +50,7 @@ type groupServiceHook struct {
type groupServiceHookConfig struct {
alloc *structs.Allocation
consul consul.ConsulServiceAPI
+ consulNamespace string
restarter agentconsul.WorkloadRestarter
taskEnvBuilder *taskenv.Builder
networkStatusGetter networkStatusGetter
@@ -64,12 +70,13 @@ func newGroupServiceHook(cfg groupServiceHookConfig) *groupServiceHook {
group: cfg.alloc.TaskGroup,
restarter: cfg.restarter,
consulClient: cfg.consul,
+ consulNamespace: cfg.consulNamespace,
taskEnvBuilder: cfg.taskEnvBuilder,
delay: shutdownDelay,
networkStatusGetter: cfg.networkStatusGetter,
+ logger: cfg.logger.Named(groupServiceHookName),
+ services: cfg.alloc.Job.LookupTaskGroup(cfg.alloc.TaskGroup).Services,
}
- h.logger = cfg.logger.Named(h.Name())
- h.services = cfg.alloc.Job.LookupTaskGroup(h.group).Services
if cfg.alloc.AllocatedResources != nil {
h.networks = cfg.alloc.AllocatedResources.Shared.Networks
@@ -84,7 +91,7 @@ func newGroupServiceHook(cfg groupServiceHookConfig) *groupServiceHook {
}
func (*groupServiceHook) Name() string {
- return "group_services"
+ return groupServiceHookName
}
func (h *groupServiceHook) Prerun() error {
@@ -220,13 +227,14 @@ func (h *groupServiceHook) getWorkloadServices() *agentconsul.WorkloadServices {
// Create task services struct with request's driver metadata
return &agentconsul.WorkloadServices{
- AllocID: h.allocID,
- Group: h.group,
- Restarter: h.restarter,
- Services: interpolatedServices,
- Networks: h.networks,
- NetworkStatus: netStatus,
- Ports: h.ports,
- Canary: h.canary,
+ AllocID: h.allocID,
+ Group: h.group,
+ ConsulNamespace: h.consulNamespace,
+ Restarter: h.restarter,
+ Services: interpolatedServices,
+ Networks: h.networks,
+ NetworkStatus: netStatus,
+ Ports: h.ports,
+ Canary: h.canary,
}
}
diff --git a/client/allocrunner/groupservice_hook_test.go b/client/allocrunner/groupservice_hook_test.go
index e1bb2ef19..59d3b7a7a 100644
--- a/client/allocrunner/groupservice_hook_test.go
+++ b/client/allocrunner/groupservice_hook_test.go
@@ -238,7 +238,9 @@ func TestGroupServiceHook_Update08Alloc(t *testing.T) {
consulConfig.Address = testconsul.HTTPAddr
consulClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
- serviceClient := agentconsul.NewServiceClient(consulClient.Agent(), testlog.HCLogger(t), true)
+ namespacesClient := agentconsul.NewNamespacesClient(consulClient.Namespaces())
+
+ serviceClient := agentconsul.NewServiceClient(consulClient.Agent(), namespacesClient, testlog.HCLogger(t), true)
// Lower periodicInterval to ensure periodic syncing doesn't improperly
// remove Connect services.
diff --git a/client/allocrunner/taskrunner/connect_native_hook_test.go b/client/allocrunner/taskrunner/connect_native_hook_test.go
index 6b722dee6..9ec54bc46 100644
--- a/client/allocrunner/taskrunner/connect_native_hook_test.go
+++ b/client/allocrunner/taskrunner/connect_native_hook_test.go
@@ -312,8 +312,9 @@ func TestTaskRunner_ConnectNativeHook_Ok(t *testing.T) {
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
+ namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces())
- consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), logger, true)
+ consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
@@ -376,8 +377,9 @@ func TestTaskRunner_ConnectNativeHook_with_SI_token(t *testing.T) {
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
+ namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces())
- consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), logger, true)
+ consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
@@ -452,8 +454,9 @@ func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) {
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
+ namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces())
- consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), logger, true)
+ consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
@@ -571,8 +574,9 @@ func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) {
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
+ namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces())
- consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), logger, true)
+ consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
diff --git a/client/allocrunner/taskrunner/envoy_bootstrap_hook.go b/client/allocrunner/taskrunner/envoy_bootstrap_hook.go
index 322e2639b..4489b14f6 100644
--- a/client/allocrunner/taskrunner/envoy_bootstrap_hook.go
+++ b/client/allocrunner/taskrunner/envoy_bootstrap_hook.go
@@ -49,9 +49,10 @@ func newConsulTransportConfig(consul *config.ConsulConfig) consulTransportConfig
}
type envoyBootstrapHookConfig struct {
- consul consulTransportConfig
- alloc *structs.Allocation
- logger hclog.Logger
+ alloc *structs.Allocation
+ consul consulTransportConfig
+ consulNamespace string
+ logger hclog.Logger
}
func decodeTriState(b *bool) string {
@@ -65,11 +66,12 @@ func decodeTriState(b *bool) string {
}
}
-func newEnvoyBootstrapHookConfig(alloc *structs.Allocation, consul *config.ConsulConfig, logger hclog.Logger) *envoyBootstrapHookConfig {
+func newEnvoyBootstrapHookConfig(alloc *structs.Allocation, consul *config.ConsulConfig, consulNamespace string, logger hclog.Logger) *envoyBootstrapHookConfig {
return &envoyBootstrapHookConfig{
- alloc: alloc,
- logger: logger,
- consul: newConsulTransportConfig(consul),
+ alloc: alloc,
+ consul: newConsulTransportConfig(consul),
+ consulNamespace: consulNamespace,
+ logger: logger,
}
}
@@ -95,18 +97,36 @@ type envoyBootstrapHook struct {
// before contacting Consul.
consulConfig consulTransportConfig
+ // consulNamespace is the Consul namespace as set by in the job
+ consulNamespace string
+
// logger is used to log things
logger hclog.Logger
}
func newEnvoyBootstrapHook(c *envoyBootstrapHookConfig) *envoyBootstrapHook {
return &envoyBootstrapHook{
- alloc: c.alloc,
- consulConfig: c.consul,
- logger: c.logger.Named(envoyBootstrapHookName),
+ alloc: c.alloc,
+ consulConfig: c.consul,
+ consulNamespace: c.consulNamespace,
+ logger: c.logger.Named(envoyBootstrapHookName),
}
}
+// getConsulNamespace will resolve the Consul namespace, choosing between
+// - agent config (low precedence)
+// - task group config (high precedence)
+func (h *envoyBootstrapHook) getConsulNamespace() string {
+ var namespace string
+ if h.consulConfig.Namespace != "" {
+ namespace = h.consulConfig.Namespace
+ }
+ if h.consulNamespace != "" {
+ namespace = h.consulNamespace
+ }
+ return namespace
+}
+
func (envoyBootstrapHook) Name() string {
return envoyBootstrapHookName
}
@@ -355,8 +375,11 @@ func (h *envoyBootstrapHook) newEnvoyBootstrapArgs(
sidecarForID string // sidecar only
gateway string // gateway only
proxyID string // gateway only
+ namespace string
)
+ namespace = h.getConsulNamespace()
+
switch {
case service.Connect.HasSidecar():
sidecarForID = h.proxyServiceID(group, service)
@@ -372,7 +395,7 @@ func (h *envoyBootstrapHook) newEnvoyBootstrapArgs(
"sidecar_for", service.Name, "bootstrap_file", filepath,
"sidecar_for_id", sidecarForID, "grpc_addr", grpcAddr,
"admin_bind", envoyAdminBind, "gateway", gateway,
- "proxy_id", proxyID,
+ "proxy_id", proxyID, "namespace", namespace,
)
return envoyBootstrapArgs{
@@ -383,6 +406,7 @@ func (h *envoyBootstrapHook) newEnvoyBootstrapArgs(
siToken: siToken,
gateway: gateway,
proxyID: proxyID,
+ namespace: namespace,
}
}
@@ -397,6 +421,7 @@ type envoyBootstrapArgs struct {
siToken string
gateway string // gateways only
proxyID string // gateways only
+ namespace string
}
// args returns the CLI arguments consul needs in the correct order, with the
@@ -439,7 +464,7 @@ func (e envoyBootstrapArgs) args() []string {
arguments = append(arguments, "-client-key", v)
}
- if v := e.consulConfig.Namespace; v != "" {
+ if v := e.namespace; v != "" {
arguments = append(arguments, "-namespace", v)
}
@@ -462,7 +487,7 @@ func (e envoyBootstrapArgs) env(env []string) []string {
if v := e.consulConfig.VerifySSL; v != "" {
env = append(env, fmt.Sprintf("%s=%s", "CONSUL_HTTP_SSL_VERIFY", v))
}
- if v := e.consulConfig.Namespace; v != "" {
+ if v := e.namespace; v != "" {
env = append(env, fmt.Sprintf("%s=%s", "CONSUL_NAMESPACE", v))
}
return env
diff --git a/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go b/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go
index cb638e88e..927ae7b08 100644
--- a/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go
+++ b/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go
@@ -32,6 +32,12 @@ import (
var _ interfaces.TaskPrestartHook = (*envoyBootstrapHook)(nil)
+const (
+ // consulNamespace is empty string in OSS, because Consul OSS does not like
+ // having even the default namespace set.
+ consulNamespace = ""
+)
+
func writeTmp(t *testing.T, s string, fm os.FileMode) string {
dir, err := ioutil.TempDir("", "envoy-")
require.NoError(t, err)
@@ -299,8 +305,9 @@ func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) {
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
+ namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces())
- consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), logger, true)
+ consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
@@ -308,7 +315,7 @@ func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) {
// Run Connect bootstrap Hook
h := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(alloc, &config.ConsulConfig{
Addr: consulConfig.Address,
- }, logger))
+ }, consulNamespace, logger))
req := &interfaces.TaskPrestartRequest{
Task: sidecarTask,
TaskDir: allocDir.NewTaskDir(sidecarTask.Name),
@@ -399,8 +406,9 @@ func TestTaskRunner_EnvoyBootstrapHook_sidecar_ok(t *testing.T) {
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
+ namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces())
- consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), logger, true)
+ consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
@@ -408,7 +416,7 @@ func TestTaskRunner_EnvoyBootstrapHook_sidecar_ok(t *testing.T) {
// Run Connect bootstrap Hook
h := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(alloc, &config.ConsulConfig{
Addr: consulConfig.Address,
- }, logger))
+ }, consulNamespace, logger))
req := &interfaces.TaskPrestartRequest{
Task: sidecarTask,
TaskDir: allocDir.NewTaskDir(sidecarTask.Name),
@@ -463,9 +471,10 @@ func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) {
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
+ namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces())
// Register Group Services
- serviceClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), logger, true)
+ serviceClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go serviceClient.Run()
defer serviceClient.Shutdown()
require.NoError(t, serviceClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
@@ -489,7 +498,7 @@ func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) {
// Run Connect bootstrap hook
h := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(alloc, &config.ConsulConfig{
Addr: consulConfig.Address,
- }, logger))
+ }, consulNamespace, logger))
req := &interfaces.TaskPrestartRequest{
Task: alloc.Job.TaskGroups[0].Tasks[0],
@@ -542,7 +551,7 @@ func TestTaskRunner_EnvoyBootstrapHook_Noop(t *testing.T) {
// not get hit.
h := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(alloc, &config.ConsulConfig{
Addr: "http://127.0.0.2:1",
- }, logger))
+ }, consulNamespace, logger))
req := &interfaces.TaskPrestartRequest{
Task: task,
TaskDir: allocDir.NewTaskDir(task.Name),
@@ -615,7 +624,7 @@ func TestTaskRunner_EnvoyBootstrapHook_RecoverableError(t *testing.T) {
// Run Connect bootstrap Hook
h := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(alloc, &config.ConsulConfig{
Addr: testConsul.HTTPAddr,
- }, logger))
+ }, consulNamespace, logger))
req := &interfaces.TaskPrestartRequest{
Task: sidecarTask,
TaskDir: allocDir.NewTaskDir(sidecarTask.Name),
@@ -677,12 +686,14 @@ func TestTaskRunner_EnvoyBootstrapHook_grpcAddress(t *testing.T) {
bridgeH := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(
mock.ConnectIngressGatewayAlloc("bridge"),
new(config.ConsulConfig),
+ consulNamespace,
testlog.HCLogger(t),
))
hostH := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(
mock.ConnectIngressGatewayAlloc("host"),
new(config.ConsulConfig),
+ consulNamespace,
testlog.HCLogger(t),
))
diff --git a/client/allocrunner/taskrunner/script_check_hook.go b/client/allocrunner/taskrunner/script_check_hook.go
index 58d891089..d41eba38d 100644
--- a/client/allocrunner/taskrunner/script_check_hook.go
+++ b/client/allocrunner/taskrunner/script_check_hook.go
@@ -34,12 +34,13 @@ type scriptCheckHookConfig struct {
// scriptCheckHook implements a task runner hook for running script
// checks in the context of a task
type scriptCheckHook struct {
- consul consul.ConsulServiceAPI
- alloc *structs.Allocation
- task *structs.Task
- logger log.Logger
- shutdownWait time.Duration // max time to wait for scripts to shutdown
- shutdownCh chan struct{} // closed when all scripts should shutdown
+ consul consul.ConsulServiceAPI
+ consulNamespace string
+ alloc *structs.Allocation
+ task *structs.Task
+ logger log.Logger
+ shutdownWait time.Duration // max time to wait for scripts to shutdown
+ shutdownCh chan struct{} // closed when all scripts should shutdown
// The following fields can be changed by Update()
driverExec tinterfaces.ScriptExecutor
@@ -59,13 +60,14 @@ type scriptCheckHook struct {
// in Poststart() or Update()
func newScriptCheckHook(c scriptCheckHookConfig) *scriptCheckHook {
h := &scriptCheckHook{
- consul: c.consul,
- alloc: c.alloc,
- task: c.task,
- scripts: make(map[string]*scriptCheck),
- runningScripts: make(map[string]*taskletHandle),
- shutdownWait: defaultShutdownWait,
- shutdownCh: make(chan struct{}),
+ consul: c.consul,
+ consulNamespace: c.alloc.Job.LookupTaskGroup(c.alloc.TaskGroup).Consul.GetNamespace(),
+ alloc: c.alloc,
+ task: c.task,
+ scripts: make(map[string]*scriptCheck),
+ runningScripts: make(map[string]*taskletHandle),
+ shutdownWait: defaultShutdownWait,
+ shutdownCh: make(chan struct{}),
}
if c.shutdownWait != 0 {
@@ -183,11 +185,12 @@ func (h *scriptCheckHook) newScriptChecks() map[string]*scriptCheck {
serviceID := agentconsul.MakeAllocServiceID(
h.alloc.ID, h.task.Name, service)
sc := newScriptCheck(&scriptCheckConfig{
+ namespace: h.consulNamespace,
allocID: h.alloc.ID,
taskName: h.task.Name,
check: check,
serviceID: serviceID,
- agent: h.consul,
+ ttlUpdater: h.consul,
driverExec: h.driverExec,
taskEnv: h.taskEnv,
logger: h.logger,
@@ -226,7 +229,7 @@ func (h *scriptCheckHook) newScriptChecks() map[string]*scriptCheck {
taskName: groupTaskName,
check: check,
serviceID: serviceID,
- agent: h.consul,
+ ttlUpdater: h.consul,
driverExec: h.driverExec,
taskEnv: h.taskEnv,
logger: h.logger,
@@ -255,19 +258,20 @@ func (*scriptCheckHook) associated(task, serviceTask, checkTask string) bool {
return false
}
-// heartbeater is the subset of consul agent functionality needed by script
+// TTLUpdater is the subset of consul agent functionality needed by script
// checks to heartbeat
-type heartbeater interface {
- UpdateTTL(id, output, status string) error
+type TTLUpdater interface {
+ UpdateTTL(id, namespace, output, status string) error
}
// scriptCheck runs script checks via a interfaces.ScriptExecutor and updates the
// appropriate check's TTL when the script succeeds.
type scriptCheck struct {
- id string
- agent heartbeater
- check *structs.ServiceCheck
- lastCheckOk bool // true if the last check was ok; otherwise false
+ id string
+ consulNamespace string
+ ttlUpdater TTLUpdater
+ check *structs.ServiceCheck
+ lastCheckOk bool // true if the last check was ok; otherwise false
tasklet
}
@@ -276,8 +280,9 @@ type scriptCheckConfig struct {
allocID string
taskName string
serviceID string
+ namespace string // consul namespace (TODO: SET)
check *structs.ServiceCheck
- agent heartbeater
+ ttlUpdater TTLUpdater
driverExec tinterfaces.ScriptExecutor
taskEnv *taskenv.TaskEnv
logger log.Logger
@@ -299,7 +304,7 @@ func newScriptCheck(config *scriptCheckConfig) *scriptCheck {
orig := config.check
sc := &scriptCheck{
- agent: config.agent,
+ ttlUpdater: config.ttlUpdater,
check: config.check.Copy(),
lastCheckOk: true, // start logging on first failure
}
@@ -325,6 +330,7 @@ func newScriptCheck(config *scriptCheckConfig) *scriptCheck {
} else {
sc.id = agentconsul.MakeCheckID(config.serviceID, sc.check)
}
+ sc.consulNamespace = config.namespace
return sc
}
@@ -394,7 +400,7 @@ const (
// service registration and the first check.
func (s *scriptCheck) updateTTL(ctx context.Context, msg, state string) error {
for attempts := 0; ; attempts++ {
- err := s.agent.UpdateTTL(s.id, msg, state)
+ err := s.ttlUpdater.UpdateTTL(s.id, s.consulNamespace, msg, state)
if err == nil {
return nil
}
diff --git a/client/allocrunner/taskrunner/script_check_hook_test.go b/client/allocrunner/taskrunner/script_check_hook_test.go
index 73ea2050e..0d50c4fc0 100644
--- a/client/allocrunner/taskrunner/script_check_hook_test.go
+++ b/client/allocrunner/taskrunner/script_check_hook_test.go
@@ -19,7 +19,7 @@ import (
"github.com/stretchr/testify/require"
)
-func newScriptMock(hb heartbeater, exec interfaces.ScriptExecutor, logger hclog.Logger, interval, timeout time.Duration) *scriptCheck {
+func newScriptMock(hb TTLUpdater, exec interfaces.ScriptExecutor, logger hclog.Logger, interval, timeout time.Duration) *scriptCheck {
script := newScriptCheck(&scriptCheckConfig{
allocID: "allocid",
taskName: "testtask",
@@ -28,7 +28,7 @@ func newScriptMock(hb heartbeater, exec interfaces.ScriptExecutor, logger hclog.
Interval: interval,
Timeout: timeout,
},
- agent: hb,
+ ttlUpdater: hb,
driverExec: exec,
taskEnv: &taskenv.TaskEnv{},
logger: logger,
@@ -39,13 +39,13 @@ func newScriptMock(hb heartbeater, exec interfaces.ScriptExecutor, logger hclog.
return script
}
-// fakeHeartbeater implements the heartbeater interface to allow mocking out
+// fakeHeartbeater implements the TTLUpdater interface to allow mocking out
// Consul in script executor tests.
type fakeHeartbeater struct {
heartbeats chan heartbeat
}
-func (f *fakeHeartbeater) UpdateTTL(checkID, output, status string) error {
+func (f *fakeHeartbeater) UpdateTTL(checkID, namespace, output, status string) error {
f.heartbeats <- heartbeat{checkID: checkID, output: output, status: status}
return nil
}
@@ -67,7 +67,7 @@ func TestScript_Exec_Cancel(t *testing.T) {
defer cancel()
logger := testlog.HCLogger(t)
- script := newScriptMock(nil, // heartbeater should never be called
+ script := newScriptMock(nil, // TTLUpdater should never be called
exec, logger, time.Hour, time.Hour)
handle := script.run()
@@ -242,10 +242,10 @@ func TestScript_TaskEnvInterpolation(t *testing.T) {
map[string]string{"SVC_NAME": "frontend"}).Build()
svcHook := newServiceHook(serviceHookConfig{
- alloc: alloc,
- task: task,
- consul: consulClient,
- logger: logger,
+ alloc: alloc,
+ task: task,
+ consulServices: consulClient,
+ logger: logger,
})
// emulate prestart having been fired
svcHook.taskEnv = env
@@ -255,7 +255,7 @@ func TestScript_TaskEnvInterpolation(t *testing.T) {
task: task,
consul: consulClient,
logger: logger,
- shutdownWait: time.Hour, // heartbeater will never be called
+ shutdownWait: time.Hour, // TTLUpdater will never be called
})
// emulate prestart having been fired
scHook.taskEnv = env
diff --git a/client/allocrunner/taskrunner/service_hook.go b/client/allocrunner/taskrunner/service_hook.go
index 86dc26c00..11883046f 100644
--- a/client/allocrunner/taskrunner/service_hook.go
+++ b/client/allocrunner/taskrunner/service_hook.go
@@ -21,9 +21,10 @@ var _ interfaces.TaskExitedHook = &serviceHook{}
var _ interfaces.TaskStopHook = &serviceHook{}
type serviceHookConfig struct {
- alloc *structs.Allocation
- task *structs.Task
- consul consul.ConsulServiceAPI
+ alloc *structs.Allocation
+ task *structs.Task
+ consulServices consul.ConsulServiceAPI
+ consulNamespace string
// Restarter is a subset of the TaskLifecycle interface
restarter agentconsul.WorkloadRestarter
@@ -32,11 +33,12 @@ type serviceHookConfig struct {
}
type serviceHook struct {
- consul consul.ConsulServiceAPI
- allocID string
- taskName string
- restarter agentconsul.WorkloadRestarter
- logger log.Logger
+ allocID string
+ taskName string
+ consulNamespace string
+ consulServices consul.ConsulServiceAPI
+ restarter agentconsul.WorkloadRestarter
+ logger log.Logger
// The following fields may be updated
driverExec tinterfaces.ScriptExecutor
@@ -58,12 +60,13 @@ type serviceHook struct {
func newServiceHook(c serviceHookConfig) *serviceHook {
h := &serviceHook{
- consul: c.consul,
- allocID: c.alloc.ID,
- taskName: c.task.Name,
- services: c.task.Services,
- restarter: c.restarter,
- ports: c.alloc.AllocatedResources.Shared.Ports,
+ allocID: c.alloc.ID,
+ taskName: c.task.Name,
+ consulServices: c.consulServices,
+ consulNamespace: c.consulNamespace,
+ services: c.task.Services,
+ restarter: c.restarter,
+ ports: c.alloc.AllocatedResources.Shared.Ports,
}
if res := c.alloc.AllocatedResources.Tasks[c.task.Name]; res != nil {
@@ -95,7 +98,7 @@ func (h *serviceHook) Poststart(ctx context.Context, req *interfaces.TaskPoststa
// Create task services struct with request's driver metadata
workloadServices := h.getWorkloadServices()
- return h.consul.RegisterWorkload(workloadServices)
+ return h.consulServices.RegisterWorkload(workloadServices)
}
func (h *serviceHook) Update(ctx context.Context, req *interfaces.TaskUpdateRequest, _ *interfaces.TaskUpdateResponse) error {
@@ -118,7 +121,7 @@ func (h *serviceHook) Update(ctx context.Context, req *interfaces.TaskUpdateRequ
// Create new task services struct with those new values
newWorkloadServices := h.getWorkloadServices()
- return h.consul.UpdateWorkload(oldWorkloadServices, newWorkloadServices)
+ return h.consulServices.UpdateWorkload(oldWorkloadServices, newWorkloadServices)
}
func (h *serviceHook) updateHookFields(req *interfaces.TaskUpdateRequest) error {
@@ -168,12 +171,12 @@ func (h *serviceHook) Exited(context.Context, *interfaces.TaskExitedRequest, *in
// deregister services from Consul.
func (h *serviceHook) deregister() {
workloadServices := h.getWorkloadServices()
- h.consul.RemoveWorkload(workloadServices)
+ h.consulServices.RemoveWorkload(workloadServices)
// Canary flag may be getting flipped when the alloc is being
// destroyed, so remove both variations of the service
workloadServices.Canary = !workloadServices.Canary
- h.consul.RemoveWorkload(workloadServices)
+ h.consulServices.RemoveWorkload(workloadServices)
h.initialRegistration = false
}
@@ -190,14 +193,15 @@ func (h *serviceHook) getWorkloadServices() *agentconsul.WorkloadServices {
// Create task services struct with request's driver metadata
return &agentconsul.WorkloadServices{
- AllocID: h.allocID,
- Task: h.taskName,
- Restarter: h.restarter,
- Services: interpolatedServices,
- DriverExec: h.driverExec,
- DriverNetwork: h.driverNet,
- Networks: h.networks,
- Canary: h.canary,
- Ports: h.ports,
+ AllocID: h.allocID,
+ Task: h.taskName,
+ ConsulNamespace: h.consulNamespace,
+ Restarter: h.restarter,
+ Services: interpolatedServices,
+ DriverExec: h.driverExec,
+ DriverNetwork: h.driverNet,
+ Networks: h.networks,
+ Canary: h.canary,
+ Ports: h.ports,
}
}
diff --git a/client/allocrunner/taskrunner/service_hook_test.go b/client/allocrunner/taskrunner/service_hook_test.go
index c9c753f7b..1b577bd54 100644
--- a/client/allocrunner/taskrunner/service_hook_test.go
+++ b/client/allocrunner/taskrunner/service_hook_test.go
@@ -23,10 +23,10 @@ func TestUpdate_beforePoststart(t *testing.T) {
c := consul.NewMockConsulServiceClient(t, logger)
hook := newServiceHook(serviceHookConfig{
- alloc: alloc,
- task: alloc.LookupTask("web"),
- consul: c,
- logger: logger,
+ alloc: alloc,
+ task: alloc.LookupTask("web"),
+ consulServices: c,
+ logger: logger,
})
require.NoError(t, hook.Update(context.Background(), &interfaces.TaskUpdateRequest{Alloc: alloc}, &interfaces.TaskUpdateResponse{}))
require.Len(t, c.GetOps(), 0)
diff --git a/client/allocrunner/taskrunner/task_runner_hooks.go b/client/allocrunner/taskrunner/task_runner_hooks.go
index 7a86adc7f..48b3c0426 100644
--- a/client/allocrunner/taskrunner/task_runner_hooks.go
+++ b/client/allocrunner/taskrunner/task_runner_hooks.go
@@ -89,26 +89,31 @@ func (tr *TaskRunner) initHooks() {
}))
}
+ // Get the consul namespace for the TG of the allocation
+ consulNamespace := tr.alloc.ConsulNamespace()
+
// If there are templates is enabled, add the hook
if len(task.Templates) != 0 {
tr.runnerHooks = append(tr.runnerHooks, newTemplateHook(&templateHookConfig{
- logger: hookLogger,
- lifecycle: tr,
- events: tr,
- templates: task.Templates,
- clientConfig: tr.clientConfig,
- envBuilder: tr.envBuilder,
+ logger: hookLogger,
+ lifecycle: tr,
+ events: tr,
+ templates: task.Templates,
+ clientConfig: tr.clientConfig,
+ envBuilder: tr.envBuilder,
+ consulNamespace: consulNamespace,
}))
}
// Always add the service hook. A task with no services on initial registration
// may be updated to include services, which must be handled with this hook.
tr.runnerHooks = append(tr.runnerHooks, newServiceHook(serviceHookConfig{
- alloc: tr.Alloc(),
- task: tr.Task(),
- consul: tr.consulServiceClient,
- restarter: tr,
- logger: hookLogger,
+ alloc: tr.Alloc(),
+ task: tr.Task(),
+ consulServices: tr.consulServiceClient,
+ consulNamespace: consulNamespace,
+ restarter: tr,
+ logger: hookLogger,
}))
// If this is a Connect sidecar proxy (or a Connect Native) service,
@@ -129,7 +134,7 @@ func (tr *TaskRunner) initHooks() {
if task.UsesConnectSidecar() {
tr.runnerHooks = append(tr.runnerHooks,
newEnvoyVersionHook(newEnvoyVersionHookConfig(alloc, tr.consulProxiesClient, hookLogger)),
- newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(alloc, tr.clientConfig.ConsulConfig, hookLogger)),
+ newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(alloc, tr.clientConfig.ConsulConfig, consulNamespace, hookLogger)),
)
} else if task.Kind.IsConnectNative() {
tr.runnerHooks = append(tr.runnerHooks, newConnectNativeHook(
diff --git a/client/allocrunner/taskrunner/task_runner_test.go b/client/allocrunner/taskrunner/task_runner_test.go
index e743c2255..f4217054d 100644
--- a/client/allocrunner/taskrunner/task_runner_test.go
+++ b/client/allocrunner/taskrunner/task_runner_test.go
@@ -1108,7 +1108,8 @@ func TestTaskRunner_CheckWatcher_Restart(t *testing.T) {
// backed by a mock consul whose checks are always unhealthy.
consulAgent := agentconsul.NewMockAgent()
consulAgent.SetStatus("critical")
- consulClient := agentconsul.NewServiceClient(consulAgent, conf.Logger, true)
+ namespacesClient := agentconsul.NewNamespacesClient(agentconsul.NewMockNamespaces(nil))
+ consulClient := agentconsul.NewServiceClient(consulAgent, namespacesClient, conf.Logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
@@ -1786,7 +1787,8 @@ func TestTaskRunner_DriverNetwork(t *testing.T) {
// Use a mock agent to test for services
consulAgent := agentconsul.NewMockAgent()
- consulClient := agentconsul.NewServiceClient(consulAgent, conf.Logger, true)
+ namespacesClient := agentconsul.NewNamespacesClient(agentconsul.NewMockNamespaces(nil))
+ consulClient := agentconsul.NewServiceClient(consulAgent, namespacesClient, conf.Logger, true)
defer consulClient.Shutdown()
go consulClient.Run()
@@ -1801,7 +1803,7 @@ func TestTaskRunner_DriverNetwork(t *testing.T) {
testWaitForTaskToStart(t, tr)
testutil.WaitForResult(func() (bool, error) {
- services, _ := consulAgent.Services()
+ services, _ := consulAgent.ServicesWithFilterOpts("", nil)
if n := len(services); n != 2 {
return false, fmt.Errorf("expected 2 services, but found %d", n)
}
@@ -1852,7 +1854,7 @@ func TestTaskRunner_DriverNetwork(t *testing.T) {
return true, nil
}, func(err error) {
- services, _ := consulAgent.Services()
+ services, _ := consulAgent.ServicesWithFilterOpts("", nil)
for _, s := range services {
t.Logf(pretty.Sprint("Service: ", s))
}
diff --git a/client/allocrunner/taskrunner/template/template.go b/client/allocrunner/taskrunner/template/template.go
index 927071d43..5c6202941 100644
--- a/client/allocrunner/taskrunner/template/template.go
+++ b/client/allocrunner/taskrunner/template/template.go
@@ -85,6 +85,9 @@ type TaskTemplateManagerConfig struct {
// ClientConfig is the Nomad Client configuration
ClientConfig *config.Config
+ // ConsulNamespace is the Consul namespace for the task
+ ConsulNamespace string
+
// VaultToken is the Vault token for the task.
VaultToken string
@@ -642,7 +645,12 @@ func newRunnerConfig(config *TaskTemplateManagerConfig,
if cc.ConsulConfig != nil {
conf.Consul.Address = &cc.ConsulConfig.Addr
conf.Consul.Token = &cc.ConsulConfig.Token
- conf.Consul.Namespace = &cc.ConsulConfig.Namespace
+
+ // Get the Consul namespace from agent config. This is the lower level
+ // of precedence (beyond default).
+ if cc.ConsulConfig.Namespace != "" {
+ conf.Consul.Namespace = &cc.ConsulConfig.Namespace
+ }
if cc.ConsulConfig.EnableSSL != nil && *cc.ConsulConfig.EnableSSL {
verify := cc.ConsulConfig.VerifySSL != nil && *cc.ConsulConfig.VerifySSL
@@ -669,6 +677,12 @@ func newRunnerConfig(config *TaskTemplateManagerConfig,
}
}
+ // Get the Consul namespace from job/group config. This is the higher level
+ // of precedence if set (above agent config).
+ if config.ConsulNamespace != "" {
+ conf.Consul.Namespace = &config.ConsulNamespace
+ }
+
// Setup the Vault config
// Always set these to ensure nothing is picked up from the environment
emptyStr := ""
diff --git a/client/allocrunner/taskrunner/template_hook.go b/client/allocrunner/taskrunner/template_hook.go
index 58150bbab..8b4fb1222 100644
--- a/client/allocrunner/taskrunner/template_hook.go
+++ b/client/allocrunner/taskrunner/template_hook.go
@@ -14,6 +14,10 @@ import (
"github.com/hashicorp/nomad/nomad/structs"
)
+const (
+ templateHookName = "template"
+)
+
type templateHookConfig struct {
// logger is used to log
logger log.Logger
@@ -32,6 +36,9 @@ type templateHookConfig struct {
// envBuilder is the environment variable builder for the task.
envBuilder *taskenv.Builder
+
+ // consulNamespace is the current Consul namespace
+ consulNamespace string
}
type templateHook struct {
@@ -44,6 +51,9 @@ type templateHook struct {
templateManager *template.TaskTemplateManager
managerLock sync.Mutex
+ // consulNamespace is the current Consul namespace
+ consulNamespace string
+
// vaultToken is the current Vault token
vaultToken string
@@ -55,15 +65,15 @@ type templateHook struct {
}
func newTemplateHook(config *templateHookConfig) *templateHook {
- h := &templateHook{
- config: config,
+ return &templateHook{
+ config: config,
+ consulNamespace: config.consulNamespace,
+ logger: config.logger.Named(templateHookName),
}
- h.logger = config.logger.Named(h.Name())
- return h
}
func (*templateHook) Name() string {
- return "template"
+ return templateHookName
}
func (h *templateHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error {
@@ -106,6 +116,7 @@ func (h *templateHook) newManager() (unblock chan struct{}, err error) {
Events: h.config.events,
Templates: h.config.templates,
ClientConfig: h.config.clientConfig,
+ ConsulNamespace: h.config.consulNamespace,
VaultToken: h.vaultToken,
VaultNamespace: h.vaultNamespace,
TaskDir: h.taskDir,
diff --git a/client/consul/consul.go b/client/consul/consul.go
index 2322dcafe..9459e375d 100644
--- a/client/consul/consul.go
+++ b/client/consul/consul.go
@@ -25,7 +25,7 @@ type ConsulServiceAPI interface {
AllocRegistrations(allocID string) (*consul.AllocRegistration, error)
// UpdateTTL is used to update the TTL of a check.
- UpdateTTL(id, output, status string) error
+ UpdateTTL(id, namespace, output, status string) error
}
// TokenDeriverFunc takes an allocation and a set of tasks and derives a
diff --git a/client/consul/consul_testing.go b/client/consul/consul_testing.go
index 0384e4c95..605bf96cf 100644
--- a/client/consul/consul_testing.go
+++ b/client/consul/consul_testing.go
@@ -97,12 +97,12 @@ func (m *MockConsulServiceClient) AllocRegistrations(allocID string) (*consul.Al
return nil, nil
}
-func (m *MockConsulServiceClient) UpdateTTL(checkID, output, status string) error {
+func (m *MockConsulServiceClient) UpdateTTL(checkID, namespace, output, status string) error {
// TODO(tgross): this method is here so we can implement the
// interface but the locking we need for testing creates a lot
// of opportunities for deadlocks in testing that will never
// appear in live code.
- m.logger.Trace("UpdateTTL", "check_id", checkID, "status", status)
+ m.logger.Trace("UpdateTTL", "check_id", checkID, "namespace", namespace, "status", status)
return nil
}
diff --git a/command/agent/agent.go b/command/agent/agent.go
index 32c134bb1..0f5b415a9 100644
--- a/command/agent/agent.go
+++ b/command/agent/agent.go
@@ -1158,7 +1158,8 @@ func (a *Agent) setupConsul(consulConfig *config.ConsulConfig) error {
}
// Create Consul Agent client for looking info about the agent.
consulAgentClient := consulClient.Agent()
- a.consulService = consul.NewServiceClient(consulAgentClient, a.logger, isClient)
+ namespacesClient := consul.NewNamespacesClient(consulClient.Namespaces())
+ a.consulService = consul.NewServiceClient(consulAgentClient, namespacesClient, a.logger, isClient)
a.consulProxies = consul.NewConnectProxiesClient(consulAgentClient)
// Run the Consul service client's sync'ing main loop
diff --git a/command/agent/consul/acl_testing.go b/command/agent/consul/acl_testing.go
index 0d4c2af07..8bd0821d6 100644
--- a/command/agent/consul/acl_testing.go
+++ b/command/agent/consul/acl_testing.go
@@ -40,6 +40,7 @@ const (
ExamplePolicyID1 = "a7c86856-0af5-4ab5-8834-03f4517e5564"
ExamplePolicyID2 = "ffa1b66c-967d-4468-8775-c687b5cfc16e"
ExamplePolicyID3 = "f68f0c36-51f8-4343-97dd-f0d4816c915f"
+ ExamplePolicyID4 = "1087ff34-b8a0-9bb3-9430-d2f758f52bd3"
)
func (m *MockACLsAPI) PolicyRead(policyID string, _ *api.QueryOptions) (*api.ACLPolicy, *api.QueryMeta, error) {
@@ -66,6 +67,12 @@ service "service1" { policy = "read" }
service "service2" { policy = "write" }`,
}, nil, nil
+ case ExamplePolicyID4:
+ return &api.ACLPolicy{
+ ID: ExamplePolicyID4,
+ Rules: `key_prefix "" { policy = "read" }`,
+ }, nil, nil
+
default:
return nil, nil, errors.New("no such policy")
}
@@ -120,6 +127,7 @@ const (
ExampleOperatorTokenID2 = "868cc216-e123-4c2b-b362-f4d4c087de8e"
ExampleOperatorTokenID3 = "6177d1b9-c0f6-4118-b891-d818a3cb80b1"
ExampleOperatorTokenID4 = "754ae26c-f3cc-e088-d486-9c0d20f5eaea"
+ ExampleOperatorTokenID5 = "097cbb45-506b-c79c-ec38-82eb0dc0794a"
)
var (
@@ -127,6 +135,7 @@ var (
SecretID: ExampleOperatorTokenID0,
AccessorID: "228865c6-3bf6-6683-df03-06dea2779088 ",
Description: "Operator Token 0",
+ Namespace: "default",
}
ExampleOperatorToken1 = &api.ACLToken{
@@ -136,6 +145,7 @@ var (
Policies: []*api.ACLTokenPolicyLink{{
ID: ExamplePolicyID1,
}},
+ Namespace: "default",
}
ExampleOperatorToken2 = &api.ACLToken{
@@ -145,6 +155,7 @@ var (
Policies: []*api.ACLTokenPolicyLink{{
ID: ExamplePolicyID2,
}},
+ Namespace: "default",
}
ExampleOperatorToken3 = &api.ACLToken{
@@ -154,6 +165,7 @@ var (
Policies: []*api.ACLTokenPolicyLink{{
ID: ExamplePolicyID3,
}},
+ Namespace: "default",
}
ExampleOperatorToken4 = &api.ACLToken{
@@ -165,6 +177,17 @@ var (
ID: ExampleRoleID1,
Name: "example-role-1",
}},
+ Namespace: "default",
+ }
+
+ ExampleOperatorToken5 = &api.ACLToken{
+ SecretID: ExampleOperatorTokenID5,
+ AccessorID: "cf39aad5-00c3-af23-cf0b-75d41e12f28d",
+ Description: "Operator Token 5",
+ Policies: []*api.ACLTokenPolicyLink{{
+ ID: ExamplePolicyID4,
+ }},
+ Namespace: "default",
}
)
@@ -183,6 +206,9 @@ func (m *MockACLsAPI) TokenReadSelf(q *api.QueryOptions) (*api.ACLToken, *api.Qu
case ExampleOperatorTokenID4:
return ExampleOperatorToken4, nil, nil
+ case ExampleOperatorTokenID5:
+ return ExampleOperatorToken5, nil, nil
+
default:
return nil, nil, errors.New("no such token")
}
@@ -253,6 +279,7 @@ func (m *MockACLsAPI) tokenCreate(token *api.ACLToken, _ *api.WriteOptions) (uin
SecretID: uuid.Generate(),
Description: token.Description,
ServiceIdentities: token.ServiceIdentities,
+ Namespace: token.Namespace,
CreateTime: time.Now(),
}
diff --git a/command/agent/consul/catalog_testing.go b/command/agent/consul/catalog_testing.go
index 40dcc9ab6..f34f05244 100644
--- a/command/agent/consul/catalog_testing.go
+++ b/command/agent/consul/catalog_testing.go
@@ -2,6 +2,7 @@ package consul
import (
"fmt"
+ "sort"
"sync"
"github.com/hashicorp/consul/api"
@@ -9,11 +10,48 @@ import (
"github.com/hashicorp/nomad/helper"
)
+// MockNamespaces is a mock implementation of NamespaceAPI.
+type MockNamespaces struct {
+ namespaces []*api.Namespace
+}
+
+var _ NamespaceAPI = (*MockNamespaces)(nil)
+
+// NewMockNamespaces creates a MockNamespaces with the given namespaces, and
+// will automatically add the "default" namespace if not included.
+func NewMockNamespaces(namespaces []string) *MockNamespaces {
+ list := helper.CopySliceString(namespaces)
+ if !helper.SliceStringContains(list, "default") {
+ list = append(list, "default")
+ }
+ sort.Strings(list)
+
+ data := make([]*api.Namespace, 0, len(list))
+ for _, namespace := range list {
+ data = append(data, &api.Namespace{
+ Name: namespace,
+ })
+ }
+
+ return &MockNamespaces{
+ namespaces: data,
+ }
+}
+
+// List implements NamespaceAPI
+func (m *MockNamespaces) List(*api.QueryOptions) ([]*api.Namespace, *api.QueryMeta, error) {
+ result := make([]*api.Namespace, len(m.namespaces))
+ copy(result, m.namespaces)
+ return result, new(api.QueryMeta), nil
+}
+
// MockCatalog can be used for testing where the CatalogAPI is needed.
type MockCatalog struct {
logger hclog.Logger
}
+var _ CatalogAPI = (*MockCatalog)(nil)
+
func NewMockCatalog(l hclog.Logger) *MockCatalog {
return &MockCatalog{logger: l.Named("mock_consul")}
}
@@ -31,9 +69,11 @@ func (m *MockCatalog) Service(service, tag string, q *api.QueryOptions) ([]*api.
// MockAgent is a fake in-memory Consul backend for ServiceClient.
type MockAgent struct {
- // maps of what services and checks have been registered
- services map[string]*api.AgentServiceRegistration
- checks map[string]*api.AgentCheckRegistration
+ // services tracks what services have been registered, per namespace
+ services map[string]map[string]*api.AgentServiceRegistration
+
+ // checks tracks what checks have been registered, per namespace
+ checks map[string]map[string]*api.AgentCheckRegistration
// hits is the total number of times agent methods have been called
hits int
@@ -41,19 +81,21 @@ type MockAgent struct {
// mu guards above fields
mu sync.Mutex
- // when UpdateTTL is called the check ID will have its counter inc'd
- checkTTLs map[string]int
+ // checkTTLS counts calls to UpdateTTL for each check, per namespace
+ checkTTLs map[string]map[string]int
// What check status to return from Checks()
checkStatus string
}
+var _ AgentAPI = (*MockAgent)(nil)
+
// NewMockAgent that returns all checks as passing.
func NewMockAgent() *MockAgent {
return &MockAgent{
- services: make(map[string]*api.AgentServiceRegistration),
- checks: make(map[string]*api.AgentCheckRegistration),
- checkTTLs: make(map[string]int),
+ services: make(map[string]map[string]*api.AgentServiceRegistration),
+ checks: make(map[string]map[string]*api.AgentCheckRegistration),
+ checkTTLs: make(map[string]map[string]int),
checkStatus: api.HealthPassing,
}
}
@@ -109,13 +151,23 @@ func (c *MockAgent) Self() (map[string]map[string]interface{}, error) {
return s, nil
}
-func (c *MockAgent) Services() (map[string]*api.AgentService, error) {
+func getNamespace(q *api.QueryOptions) string {
+ if q == nil || q.Namespace == "" {
+ return "default"
+ }
+ return q.Namespace
+}
+
+// ServicesWithFilterOpts implements AgentAPI
+func (c *MockAgent) ServicesWithFilterOpts(_ string, q *api.QueryOptions) (map[string]*api.AgentService, error) {
c.mu.Lock()
defer c.mu.Unlock()
+
c.hits++
+ namespace := getNamespace(q)
r := make(map[string]*api.AgentService, len(c.services))
- for k, v := range c.services {
+ for k, v := range c.services[namespace] {
r[k] = &api.AgentService{
ID: v.ID,
Service: v.Name,
@@ -130,104 +182,152 @@ func (c *MockAgent) Services() (map[string]*api.AgentService, error) {
return r, nil
}
-// Checks implements the Agent API Checks method.
-func (c *MockAgent) Checks() (map[string]*api.AgentCheck, error) {
+// ChecksWithFilterOpts implements AgentAPI
+func (c *MockAgent) ChecksWithFilterOpts(_ string, q *api.QueryOptions) (map[string]*api.AgentCheck, error) {
c.mu.Lock()
defer c.mu.Unlock()
+
c.hits++
+ namespace := getNamespace(q)
r := make(map[string]*api.AgentCheck, len(c.checks))
- for k, v := range c.checks {
+ for k, v := range c.checks[namespace] {
r[k] = &api.AgentCheck{
CheckID: v.ID,
Name: v.Name,
Status: c.checkStatus,
Notes: v.Notes,
ServiceID: v.ServiceID,
- ServiceName: c.services[v.ServiceID].Name,
+ ServiceName: c.services[namespace][v.ServiceID].Name,
}
}
return r, nil
}
// CheckRegs returns the raw AgentCheckRegistrations registered with this mock
-// agent.
+// agent, across all namespaces.
func (c *MockAgent) CheckRegs() []*api.AgentCheckRegistration {
c.mu.Lock()
defer c.mu.Unlock()
+
regs := make([]*api.AgentCheckRegistration, 0, len(c.checks))
- for _, check := range c.checks {
- regs = append(regs, check)
+ for namespace := range c.checks {
+ for _, check := range c.checks[namespace] {
+ regs = append(regs, check)
+ }
}
return regs
}
+// CheckRegister implements AgentAPI
func (c *MockAgent) CheckRegister(check *api.AgentCheckRegistration) error {
c.mu.Lock()
defer c.mu.Unlock()
c.hits++
- c.checks[check.ID] = check
+ // Consul will set empty Namespace to default, do the same here
+ if check.Namespace == "" {
+ check.Namespace = "default"
+ }
+
+ if c.checks[check.Namespace] == nil {
+ c.checks[check.Namespace] = make(map[string]*api.AgentCheckRegistration)
+ }
+ c.checks[check.Namespace][check.ID] = check
// Be nice and make checks reachable-by-service
- scheck := check.AgentServiceCheck
- c.services[check.ServiceID].Checks = append(c.services[check.ServiceID].Checks, &scheck)
+ serviceCheck := check.AgentServiceCheck
+ if c.services[check.Namespace] == nil {
+ c.services[check.Namespace] = make(map[string]*api.AgentServiceRegistration)
+ }
+ c.services[check.Namespace][check.ServiceID].Checks = append(c.services[check.Namespace][check.ServiceID].Checks, &serviceCheck)
return nil
}
-func (c *MockAgent) CheckDeregister(checkID string) error {
+// CheckDeregisterOpts implements AgentAPI
+func (c *MockAgent) CheckDeregisterOpts(checkID string, q *api.QueryOptions) error {
c.mu.Lock()
defer c.mu.Unlock()
+
c.hits++
- delete(c.checks, checkID)
- delete(c.checkTTLs, checkID)
+ namespace := getNamespace(q)
+
+ delete(c.checks[namespace], checkID)
+ delete(c.checkTTLs[namespace], checkID)
return nil
}
+// ServiceRegister implements AgentAPI
func (c *MockAgent) ServiceRegister(service *api.AgentServiceRegistration) error {
c.mu.Lock()
defer c.mu.Unlock()
+
c.hits++
- c.services[service.ID] = service
+
+ // Consul will set empty Namespace to default, do the same here
+ if service.Namespace == "" {
+ service.Namespace = "default"
+ }
+
+ if c.services[service.Namespace] == nil {
+ c.services[service.Namespace] = make(map[string]*api.AgentServiceRegistration)
+ }
+ c.services[service.Namespace][service.ID] = service
return nil
}
-func (c *MockAgent) ServiceDeregister(serviceID string) error {
+// ServiceDeregisterOpts implements AgentAPI
+func (c *MockAgent) ServiceDeregisterOpts(serviceID string, q *api.QueryOptions) error {
c.mu.Lock()
defer c.mu.Unlock()
+
c.hits++
- delete(c.services, serviceID)
- for k, v := range c.checks {
+ namespace := getNamespace(q)
+
+ delete(c.services[namespace], serviceID)
+
+ for k, v := range c.checks[namespace] {
if v.ServiceID == serviceID {
- delete(c.checks, k)
- delete(c.checkTTLs, k)
+ delete(c.checks[namespace], k)
+ delete(c.checkTTLs[namespace], k)
}
}
return nil
}
-func (c *MockAgent) UpdateTTL(id string, output string, status string) error {
+// UpdateTTLOpts implements AgentAPI
+func (c *MockAgent) UpdateTTLOpts(id string, output string, status string, q *api.QueryOptions) error {
c.mu.Lock()
defer c.mu.Unlock()
- c.hits++
- check, ok := c.checks[id]
- if !ok {
- return fmt.Errorf("unknown check id: %q", id)
+ c.hits++
+ namespace := getNamespace(q)
+
+ checks, nsExists := c.checks[namespace]
+ if !nsExists {
+ return fmt.Errorf("unknown checks namespace: %q", namespace)
}
+
+ check, checkExists := checks[id]
+ if !checkExists {
+ return fmt.Errorf("unknown check: %s/%s", namespace, id)
+ }
+
// Flip initial status to passing
+ // todo(shoenig) why not just set to the given status?
check.Status = "passing"
- c.checkTTLs[id]++
+ c.checkTTLs[namespace][id]++
+
return nil
}
// a convenience method for looking up a registered service by name
-func (c *MockAgent) lookupService(name string) []*api.AgentServiceRegistration {
+func (c *MockAgent) lookupService(namespace, name string) []*api.AgentServiceRegistration {
c.mu.Lock()
defer c.mu.Unlock()
var services []*api.AgentServiceRegistration
- for _, service := range c.services {
+ for _, service := range c.services[namespace] {
if service.Name == name {
services = append(services, service)
}
diff --git a/command/agent/consul/check_watcher.go b/command/agent/consul/check_watcher.go
index 058c298f5..295a7f4fc 100644
--- a/command/agent/consul/check_watcher.go
+++ b/command/agent/consul/check_watcher.go
@@ -18,8 +18,7 @@ const (
// ChecksAPI is the part of the Consul API the checkWatcher requires.
type ChecksAPI interface {
- // Checks returns a list of all checks.
- Checks() (map[string]*api.AgentCheck, error)
+ ChecksWithFilterOpts(filter string, q *api.QueryOptions) (map[string]*api.AgentCheck, error)
}
// WorkloadRestarter allows the checkWatcher to restart tasks or entire task groups.
@@ -141,7 +140,8 @@ type checkWatchUpdate struct {
// checkWatcher watches Consul checks and restarts tasks when they're
// unhealthy.
type checkWatcher struct {
- consul ChecksAPI
+ namespacesClient *NamespacesClient
+ checksAPI ChecksAPI
// pollFreq is how often to poll the checks API and defaults to
// defaultPollFreq
@@ -162,13 +162,14 @@ type checkWatcher struct {
}
// newCheckWatcher creates a new checkWatcher but does not call its Run method.
-func newCheckWatcher(logger log.Logger, consul ChecksAPI) *checkWatcher {
+func newCheckWatcher(logger log.Logger, checksAPI ChecksAPI, namespacesClient *NamespacesClient) *checkWatcher {
return &checkWatcher{
- consul: consul,
- pollFreq: defaultPollFreq,
- checkUpdateCh: make(chan checkWatchUpdate, 8),
- done: make(chan struct{}),
- logger: logger.ResetNamed("consul.health"),
+ namespacesClient: namespacesClient,
+ checksAPI: checksAPI,
+ pollFreq: defaultPollFreq,
+ checkUpdateCh: make(chan checkWatchUpdate, 8),
+ done: make(chan struct{}),
+ logger: logger.ResetNamed("consul.health"),
}
}
@@ -196,6 +197,7 @@ func (w *checkWatcher) Run(ctx context.Context) {
stopTimer()
// Main watch loop
+WATCHER:
for {
// disable polling if there are no checks
if len(checks) == 0 {
@@ -230,13 +232,30 @@ func (w *checkWatcher) Run(ctx context.Context) {
// Set "now" as the point in time the following check results represent
now := time.Now()
- results, err := w.consul.Checks()
+ // Get the list of all namespaces so we can iterate them.
+ namespaces, err := w.namespacesClient.List()
if err != nil {
if !w.lastErr {
w.lastErr = true
- w.logger.Error("failed retrieving health checks", "error", err)
+ w.logger.Error("failed retrieving namespaces", "error", err)
+ }
+ continue WATCHER
+ }
+
+ checkResults := make(map[string]*api.AgentCheck)
+ for _, namespace := range namespaces {
+ nsResults, err := w.checksAPI.ChecksWithFilterOpts("", &api.QueryOptions{Namespace: normalizeNamespace(namespace)})
+ if err != nil {
+ if !w.lastErr {
+ w.lastErr = true
+ w.logger.Error("failed retrieving health checks", "error", err)
+ }
+ continue WATCHER
+ } else {
+ for k, v := range nsResults {
+ checkResults[k] = v
+ }
}
- continue
}
w.lastErr = false
@@ -259,11 +278,11 @@ func (w *checkWatcher) Run(ctx context.Context) {
continue
}
- result, ok := results[cid]
+ result, ok := checkResults[cid]
if !ok {
// Only warn if outside grace period to avoid races with check registration
if now.After(check.graceUntil) {
- w.logger.Warn("watched check not found in Consul", "check", check.checkName, "check_id", cid)
+ // w.logger.Warn("watched check not found in Consul", "check", check.checkName, "check_id", cid) // add back
}
continue
}
diff --git a/command/agent/consul/check_watcher_test.go b/command/agent/consul/check_watcher_test.go
index e62c19f7b..0267699c3 100644
--- a/command/agent/consul/check_watcher_test.go
+++ b/command/agent/consul/check_watcher_test.go
@@ -123,7 +123,7 @@ func (c *fakeChecksAPI) add(id, status string, at time.Time) {
c.mu.Unlock()
}
-func (c *fakeChecksAPI) Checks() (map[string]*api.AgentCheck, error) {
+func (c *fakeChecksAPI) ChecksWithFilterOpts(filter string, opts *api.QueryOptions) (map[string]*api.AgentCheck, error) {
c.mu.Lock()
defer c.mu.Unlock()
now := time.Now()
@@ -149,10 +149,12 @@ func (c *fakeChecksAPI) Checks() (map[string]*api.AgentCheck, error) {
// testWatcherSetup sets up a fakeChecksAPI and a real checkWatcher with a test
// logger and faster poll frequency.
func testWatcherSetup(t *testing.T) (*fakeChecksAPI, *checkWatcher) {
- fakeAPI := newFakeChecksAPI()
- cw := newCheckWatcher(testlog.HCLogger(t), fakeAPI)
+ logger := testlog.HCLogger(t)
+ checksAPI := newFakeChecksAPI()
+ namespacesClient := NewNamespacesClient(NewMockNamespaces(nil))
+ cw := newCheckWatcher(logger, checksAPI, namespacesClient)
cw.pollFreq = 10 * time.Millisecond
- return fakeAPI, cw
+ return checksAPI, cw
}
func testCheck() *structs.ServiceCheck {
@@ -176,7 +178,11 @@ func TestCheckWatcher_Skip(t *testing.T) {
check := testCheck()
check.CheckRestart = nil
- cw := newCheckWatcher(testlog.HCLogger(t), newFakeChecksAPI())
+ logger := testlog.HCLogger(t)
+ checksAPI := newFakeChecksAPI()
+ namespacesClient := NewNamespacesClient(NewMockNamespaces(nil))
+
+ cw := newCheckWatcher(logger, checksAPI, namespacesClient)
restarter1 := newFakeCheckRestarter(cw, "testalloc1", "testtask1", "testcheck1", check)
cw.Watch("testalloc1", "testtask1", "testcheck1", check, restarter1)
diff --git a/command/agent/consul/group_test.go b/command/agent/consul/group_test.go
index 9c38207fd..2dd506a2e 100644
--- a/command/agent/consul/group_test.go
+++ b/command/agent/consul/group_test.go
@@ -32,7 +32,8 @@ func TestConsul_Connect(t *testing.T) {
consulConfig.Address = testconsul.HTTPAddr
consulClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
- serviceClient := NewServiceClient(consulClient.Agent(), testlog.HCLogger(t), true)
+ namespacesClient := NewNamespacesClient(consulClient.Namespaces())
+ serviceClient := NewServiceClient(consulClient.Agent(), namespacesClient, testlog.HCLogger(t), true)
// Lower periodicInterval to ensure periodic syncing doesn't improperly
// remove Connect services.
diff --git a/command/agent/consul/int_test.go b/command/agent/consul/int_test.go
index b4760a768..318a46f34 100644
--- a/command/agent/consul/int_test.go
+++ b/command/agent/consul/int_test.go
@@ -135,7 +135,8 @@ func TestConsul_Integration(t *testing.T) {
consulClient, err := consulapi.NewClient(consulConfig)
r.Nil(err)
- serviceClient := consul.NewServiceClient(consulClient.Agent(), testlog.HCLogger(t), true)
+ namespacesClient := consul.NewNamespacesClient(consulClient.Namespaces())
+ serviceClient := consul.NewServiceClient(consulClient.Agent(), namespacesClient, testlog.HCLogger(t), true)
defer serviceClient.Shutdown() // just-in-case cleanup
consulRan := make(chan struct{})
go func() {
diff --git a/command/agent/consul/namespaces_client.go b/command/agent/consul/namespaces_client.go
new file mode 100644
index 000000000..0bcbb10e2
--- /dev/null
+++ b/command/agent/consul/namespaces_client.go
@@ -0,0 +1,42 @@
+package consul
+
+import (
+ "sort"
+ "strings"
+)
+
+// NamespacesClient is a wrapper for the Consul NamespacesAPI, that is used to
+// deal with Consul OSS vs Consul Enterprise behavior in listing namespaces.
+type NamespacesClient struct {
+ namespacesAPI NamespaceAPI
+}
+
+// NewNamespacesClient returns a NamespacesClient backed by a NamespaceAPI.
+func NewNamespacesClient(namespacesAPI NamespaceAPI) *NamespacesClient {
+ return &NamespacesClient{
+ namespacesAPI: namespacesAPI,
+ }
+}
+
+// List returns a list of Consul Namespaces.
+//
+// If using Consul OSS, the list is a single element with the "default" namespace,
+// even though the response from Consul OSS is an error.
+func (ns *NamespacesClient) List() ([]string, error) {
+ namespaces, _, err := ns.namespacesAPI.List(nil)
+ if err != nil {
+ // check if the error was a 404, indicating Consul is the OSS version
+ // which does not have the /v1/namespace handler
+ if strings.Contains(err.Error(), "response code: 404") {
+ return []string{"default"}, nil
+ }
+ return nil, err
+ }
+
+ result := make([]string, 0, len(namespaces))
+ for _, namespace := range namespaces {
+ result = append(result, namespace.Name)
+ }
+ sort.Strings(result)
+ return result, nil
+}
diff --git a/command/agent/consul/service_client.go b/command/agent/consul/service_client.go
index 66c00225c..9c451754b 100644
--- a/command/agent/consul/service_client.go
+++ b/command/agent/consul/service_client.go
@@ -12,8 +12,9 @@ import (
"sync/atomic"
"time"
- metrics "github.com/armon/go-metrics"
+ "github.com/armon/go-metrics"
log "github.com/hashicorp/go-hclog"
+ "github.com/pkg/errors"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/nomad/helper"
@@ -89,20 +90,28 @@ type CatalogAPI interface {
Service(service, tag string, q *api.QueryOptions) ([]*api.CatalogService, *api.QueryMeta, error)
}
+// NamespaceAPI is the consul/api.Namespace API used by Nomad.
+//
+// ACL requirements
+// - operator:read OR namespace:*:read
+type NamespaceAPI interface {
+ List(q *api.QueryOptions) ([]*api.Namespace, *api.QueryMeta, error)
+}
+
// AgentAPI is the consul/api.Agent API used by Nomad.
//
// ACL requirements
// - agent:read
// - service:write
type AgentAPI interface {
- Services() (map[string]*api.AgentService, error)
- Checks() (map[string]*api.AgentCheck, error)
+ ServicesWithFilterOpts(filter string, q *api.QueryOptions) (map[string]*api.AgentService, error)
+ ChecksWithFilterOpts(filter string, q *api.QueryOptions) (map[string]*api.AgentCheck, error)
CheckRegister(check *api.AgentCheckRegistration) error
- CheckDeregister(checkID string) error
+ CheckDeregisterOpts(checkID string, q *api.QueryOptions) error
Self() (map[string]map[string]interface{}, error)
ServiceRegister(service *api.AgentServiceRegistration) error
- ServiceDeregister(serviceID string) error
- UpdateTTL(id, output, status string) error
+ ServiceDeregisterOpts(serviceID string, q *api.QueryOptions) error
+ UpdateTTLOpts(id, output, status string, q *api.QueryOptions) error
}
// ConfigAPI is the consul/api.ConfigEntries API subset used by Nomad Server.
@@ -373,7 +382,9 @@ func (s *ServiceRegistration) copy() *ServiceRegistration {
// ServiceClient handles task and agent service registration with Consul.
type ServiceClient struct {
- client AgentAPI
+ agentAPI AgentAPI
+ namespacesClient *NamespacesClient
+
logger log.Logger
retryInterval time.Duration
maxRetryInterval time.Duration
@@ -402,8 +413,8 @@ type ServiceClient struct {
allocRegistrations map[string]*AllocRegistration
allocRegistrationsLock sync.RWMutex
- // agent services and checks record entries for the agent itself which
- // should be removed on shutdown
+ // Nomad agent services and checks that are recorded so they can be removed
+ // on shutdown. Defers to consul namespace specified in client consul config.
agentServices map[string]struct{}
agentChecks map[string]struct{}
agentLock sync.Mutex
@@ -429,10 +440,11 @@ type ServiceClient struct {
// Client, logger and takes whether the client is being used by a Nomad Client agent.
// When being used by a Nomad client, this Consul client reconciles all services and
// checks created by Nomad on behalf of running tasks.
-func NewServiceClient(consulClient AgentAPI, logger log.Logger, isNomadClient bool) *ServiceClient {
+func NewServiceClient(agentAPI AgentAPI, namespacesClient *NamespacesClient, logger log.Logger, isNomadClient bool) *ServiceClient {
logger = logger.ResetNamed("consul.sync")
return &ServiceClient{
- client: consulClient,
+ agentAPI: agentAPI,
+ namespacesClient: namespacesClient,
logger: logger,
retryInterval: defaultRetryInterval,
maxRetryInterval: defaultMaxRetryInterval,
@@ -448,7 +460,7 @@ func NewServiceClient(consulClient AgentAPI, logger log.Logger, isNomadClient bo
allocRegistrations: make(map[string]*AllocRegistration),
agentServices: make(map[string]struct{}),
agentChecks: make(map[string]struct{}),
- checkWatcher: newCheckWatcher(logger, consulClient),
+ checkWatcher: newCheckWatcher(logger, agentAPI, namespacesClient),
isClientAgent: isNomadClient,
deregisterProbationExpiry: time.Now().Add(deregisterProbationPeriod),
}
@@ -492,7 +504,7 @@ func (c *ServiceClient) Run() {
// init will be closed when Consul has been contacted
init := make(chan struct{})
- go checkConsulTLSSkipVerify(ctx, c.logger, c.client, init)
+ go checkConsulTLSSkipVerify(ctx, c.logger, c.agentAPI, init)
// Process operations while waiting for initial contact with Consul but
// do not sync until contact has been made.
@@ -604,8 +616,8 @@ func (c *ServiceClient) commit(ops *operations) {
}
func (c *ServiceClient) clearExplicitlyDeregistered() {
- c.explicitlyDeregisteredServices = map[string]bool{}
- c.explicitlyDeregisteredChecks = map[string]bool{}
+ c.explicitlyDeregisteredServices = make(map[string]bool)
+ c.explicitlyDeregisteredChecks = make(map[string]bool)
}
// merge registrations into state map prior to sync'ing with Consul
@@ -631,17 +643,34 @@ func (c *ServiceClient) merge(ops *operations) {
// sync enqueued operations.
func (c *ServiceClient) sync(reason syncReason) error {
sreg, creg, sdereg, cdereg := 0, 0, 0, 0
+ var err error
- consulServices, err := c.client.Services()
+ // Get the list of all namespaces created so we can iterate them.
+ namespaces, err := c.namespacesClient.List()
if err != nil {
metrics.IncrCounter([]string{"client", "consul", "sync_failure"}, 1)
- return fmt.Errorf("error querying Consul services: %v", err)
+ return errors.Wrap(err, "failed to query Consul namespaces")
}
+ // Accumulate all services in Consul across all namespaces.
+ servicesInConsul := make(map[string]*api.AgentService)
+ for _, namespace := range namespaces {
+ if nsServices, err := c.agentAPI.ServicesWithFilterOpts("", &api.QueryOptions{Namespace: normalizeNamespace(namespace)}); err != nil {
+ metrics.IncrCounter([]string{"client", "consul", "sync_failure"}, 1)
+ return errors.Wrap(err, "failed to query Consul services")
+ } else {
+ for k, v := range nsServices {
+ servicesInConsul[k] = v
+ }
+ }
+ }
+
+ // Compute whether we are still in probation period where we will avoid
+ // de-registering services.
inProbation := time.Now().Before(c.deregisterProbationExpiry)
- // Remove Nomad services in Consul but unknown locally
- for id := range consulServices {
+ // Remove Nomad services in Consul but unknown to Nomad.
+ for id := range servicesInConsul {
if _, ok := c.services[id]; ok {
// Known service, skip
continue
@@ -667,7 +696,8 @@ func (c *ServiceClient) sync(reason syncReason) error {
}
// Unknown Nomad managed service; kill
- if err := c.client.ServiceDeregister(id); err != nil {
+ ns := servicesInConsul[id].Namespace
+ if err := c.agentAPI.ServiceDeregisterOpts(id, &api.QueryOptions{Namespace: ns}); err != nil {
if isOldNomadService(id) {
// Don't hard-fail on old entries. See #3620
continue
@@ -683,11 +713,11 @@ func (c *ServiceClient) sync(reason syncReason) error {
// Add Nomad services missing from Consul, or where the service has been updated.
for id, serviceInNomad := range c.services {
- serviceInConsul, exists := consulServices[id]
- sidecarInConsul := getNomadSidecar(id, consulServices)
+ serviceInConsul, exists := servicesInConsul[id]
+ sidecarInConsul := getNomadSidecar(id, servicesInConsul)
if !exists || agentServiceUpdateRequired(reason, serviceInNomad, serviceInConsul, sidecarInConsul) {
- if err = c.client.ServiceRegister(serviceInNomad); err != nil {
+ if err = c.agentAPI.ServiceRegister(serviceInNomad); err != nil {
metrics.IncrCounter([]string{"client", "consul", "sync_failure"}, 1)
return err
}
@@ -697,14 +727,20 @@ func (c *ServiceClient) sync(reason syncReason) error {
}
- consulChecks, err := c.client.Checks()
- if err != nil {
- metrics.IncrCounter([]string{"client", "consul", "sync_failure"}, 1)
- return fmt.Errorf("error querying Consul checks: %v", err)
+ checksInConsul := make(map[string]*api.AgentCheck)
+ for _, namespace := range namespaces {
+ nsChecks, err := c.agentAPI.ChecksWithFilterOpts("", &api.QueryOptions{Namespace: normalizeNamespace(namespace)})
+ if err != nil {
+ metrics.IncrCounter([]string{"client", "consul", "sync_failure"}, 1)
+ return errors.Wrap(err, "failed to query Consul checks")
+ }
+ for k, v := range nsChecks {
+ checksInConsul[k] = v
+ }
}
// Remove Nomad checks in Consul but unknown locally
- for id, check := range consulChecks {
+ for id, check := range checksInConsul {
if _, ok := c.checks[id]; ok {
// Known check, leave it
continue
@@ -730,7 +766,7 @@ func (c *ServiceClient) sync(reason syncReason) error {
}
// Unknown Nomad managed check; remove
- if err := c.client.CheckDeregister(id); err != nil {
+ if err := c.agentAPI.CheckDeregisterOpts(id, &api.QueryOptions{Namespace: check.Namespace}); err != nil {
if isOldNomadService(check.ServiceID) {
// Don't hard-fail on old entries.
continue
@@ -745,12 +781,11 @@ func (c *ServiceClient) sync(reason syncReason) error {
// Add Nomad checks missing from Consul
for id, check := range c.checks {
- if _, ok := consulChecks[id]; ok {
+ if _, ok := checksInConsul[id]; ok {
// Already in Consul; skipping
continue
}
-
- if err := c.client.CheckRegister(check); err != nil {
+ if err := c.agentAPI.CheckRegister(check); err != nil {
metrics.IncrCounter([]string{"client", "consul", "sync_failure"}, 1)
return err
}
@@ -820,7 +855,7 @@ func (c *ServiceClient) RegisterAgent(role string, services []*structs.Service)
}
checkHost, checkPort = host, port
}
- checkReg, err := createCheckReg(id, checkID, check, checkHost, checkPort)
+ checkReg, err := createCheckReg(id, checkID, check, checkHost, checkPort, "") // todo ... whats up with agent namespace and its checks?
if err != nil {
return fmt.Errorf("failed to add check %q: %v", check.Name, err)
}
@@ -934,6 +969,7 @@ func (c *ServiceClient) serviceRegs(ops *operations, service *structs.Service, w
Kind: kind,
ID: id,
Name: service.Name,
+ Namespace: workload.ConsulNamespace,
Tags: tags,
EnableTagOverride: service.EnableTagOverride,
Address: ip,
@@ -986,12 +1022,11 @@ func (c *ServiceClient) checkRegs(serviceID string, service *structs.Service,
}
checkID := MakeCheckID(serviceID, check)
- registration, err := createCheckReg(serviceID, checkID, check, ip, port)
+ registration, err := createCheckReg(serviceID, checkID, check, ip, port, workload.ConsulNamespace)
if err != nil {
return nil, fmt.Errorf("failed to add check %q: %v", check.Name, err)
}
sreg.CheckOnUpdate[checkID] = check.OnUpdate
-
registrations = append(registrations, registration)
}
@@ -1193,8 +1228,18 @@ func (c *ServiceClient) RemoveWorkload(workload *WorkloadServices) {
c.commit(&ops)
}
+// normalizeNamespace will turn the "default" namespace into the empty string,
+// so that Consul OSS will not produce an error setting something in the default
+// namespace.
+func normalizeNamespace(namespace string) string {
+ if namespace == "default" {
+ return ""
+ }
+ return namespace
+}
+
// AllocRegistrations returns the registrations for the given allocation. If the
-// allocation has no reservations, the response is a nil object.
+// allocation has no registrations, the response is a nil object.
func (c *ServiceClient) AllocRegistrations(allocID string) (*AllocRegistration, error) {
// Get the internal struct using the lock
c.allocRegistrationsLock.RLock()
@@ -1208,15 +1253,32 @@ func (c *ServiceClient) AllocRegistrations(allocID string) (*AllocRegistration,
reg := regInternal.copy()
c.allocRegistrationsLock.RUnlock()
- // Query the services and checks to populate the allocation registrations.
- services, err := c.client.Services()
+ // Get the list of all namespaces created so we can iterate them.
+ namespaces, err := c.namespacesClient.List()
if err != nil {
- return nil, err
+ return nil, errors.Wrap(err, "failed to retrieve namespaces from consul")
}
- checks, err := c.client.Checks()
- if err != nil {
- return nil, err
+ services := make(map[string]*api.AgentService)
+ checks := make(map[string]*api.AgentCheck)
+
+ // Query the services and checks to populate the allocation registrations.
+ for _, namespace := range namespaces {
+ nsServices, err := c.agentAPI.ServicesWithFilterOpts("", &api.QueryOptions{Namespace: normalizeNamespace(namespace)})
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to retrieve services from consul")
+ }
+ for k, v := range nsServices {
+ services[k] = v
+ }
+
+ nsChecks, err := c.agentAPI.ChecksWithFilterOpts("", &api.QueryOptions{Namespace: normalizeNamespace(namespace)})
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to retrieve checks from consul")
+ }
+ for k, v := range nsChecks {
+ checks[k] = v
+ }
}
// Populate the object
@@ -1236,8 +1298,8 @@ func (c *ServiceClient) AllocRegistrations(allocID string) (*AllocRegistration,
// UpdateTTL is used to update the TTL of a check. Typically this will only be
// called to heartbeat script checks.
-func (c *ServiceClient) UpdateTTL(id, output, status string) error {
- return c.client.UpdateTTL(id, output, status)
+func (c *ServiceClient) UpdateTTL(id, namespace, output, status string) error {
+ return c.agentAPI.UpdateTTLOpts(id, output, status, &api.QueryOptions{Namespace: normalizeNamespace(namespace)})
}
// Shutdown the Consul client. Update running task registrations and deregister
@@ -1273,14 +1335,25 @@ func (c *ServiceClient) Shutdown() error {
// Always attempt to deregister Nomad agent Consul entries, even if
// deadline was reached
for id := range c.agentServices {
- if err := c.client.ServiceDeregister(id); err != nil {
+ if err := c.agentAPI.ServiceDeregisterOpts(id, nil); err != nil {
c.logger.Error("failed deregistering agent service", "service_id", id, "error", err)
}
}
- remainingChecks, err := c.client.Checks()
+ namespaces, err := c.namespacesClient.List()
if err != nil {
- c.logger.Error("failed listing remaining checks after deregistering services", "error", err)
+ c.logger.Error("failed to retrieve namespaces from consul", "error", err)
+ }
+
+ remainingChecks := make(map[string]*api.AgentCheck)
+ for _, namespace := range namespaces {
+ nsChecks, err := c.agentAPI.ChecksWithFilterOpts("", &api.QueryOptions{Namespace: normalizeNamespace(namespace)})
+ if err != nil {
+ c.logger.Error("failed to retrieve checks from consul", "error", err)
+ }
+ for k, v := range nsChecks {
+ remainingChecks[k] = v
+ }
}
checkRemains := func(id string) bool {
@@ -1296,7 +1369,8 @@ func (c *ServiceClient) Shutdown() error {
// if we couldn't populate remainingChecks it is unlikely that CheckDeregister will work, but try anyway
// if we could list the remaining checks, verify that the check we store still exists before removing it.
if remainingChecks == nil || checkRemains(id) {
- if err := c.client.CheckDeregister(id); err != nil {
+ ns := remainingChecks[id].Namespace
+ if err := c.agentAPI.CheckDeregisterOpts(id, &api.QueryOptions{Namespace: ns}); err != nil {
c.logger.Error("failed deregistering agent check", "check_id", id, "error", err)
}
}
@@ -1370,11 +1444,12 @@ func MakeCheckID(serviceID string, check *structs.ServiceCheck) string {
//
// Script checks simply have a TTL set and the caller is responsible for
// running the script and heart-beating.
-func createCheckReg(serviceID, checkID string, check *structs.ServiceCheck, host string, port int) (*api.AgentCheckRegistration, error) {
+func createCheckReg(serviceID, checkID string, check *structs.ServiceCheck, host string, port int, namespace string) (*api.AgentCheckRegistration, error) {
chkReg := api.AgentCheckRegistration{
ID: checkID,
Name: check.Name,
ServiceID: serviceID,
+ Namespace: normalizeNamespace(namespace),
}
chkReg.Status = check.InitialStatus
chkReg.Timeout = check.Timeout.String()
diff --git a/command/agent/consul/service_client_test.go b/command/agent/consul/service_client_test.go
index ddd6fdf02..6bc91aaf3 100644
--- a/command/agent/consul/service_client_test.go
+++ b/command/agent/consul/service_client_test.go
@@ -336,9 +336,10 @@ func TestSyncLogic_maybeTweakTags_emptySC(t *testing.T) {
func TestServiceRegistration_CheckOnUpdate(t *testing.T) {
t.Parallel()
- mock := NewMockAgent()
+ mockAgent := NewMockAgent()
+ namespacesClient := NewNamespacesClient(NewMockNamespaces(nil))
logger := testlog.HCLogger(t)
- sc := NewServiceClient(mock, logger, true)
+ sc := NewServiceClient(mockAgent, namespacesClient, logger, true)
allocID := uuid.Generate()
ws := &WorkloadServices{
diff --git a/command/agent/consul/structs.go b/command/agent/consul/structs.go
index 4d71bcdb7..1163d8c59 100644
--- a/command/agent/consul/structs.go
+++ b/command/agent/consul/structs.go
@@ -9,18 +9,21 @@ import (
)
// WorkloadServices describes services defined in either a Task or TaskGroup
-// that need to be syncronized with Consul
+// that need to be syncronized with Consul.
type WorkloadServices struct {
AllocID string
// Name of the task and task group the services are defined for. For
- // group based services, Task will be empty
+ // group based services, Task will be empty.
Task string
Group string
- // Canary indicates whether or not the allocation is a canary
+ // Canary indicates whether or not the allocation is a canary.
Canary bool
+ // ConsulNamespace is the consul namespace in which services will be registered.
+ ConsulNamespace string
+
// Restarter allows restarting the task or task group depending on the
// check_restart stanzas.
Restarter WorkloadRestarter
@@ -32,16 +35,16 @@ type WorkloadServices struct {
// TODO: remove and use Ports
Networks structs.Networks
- // NetworkStatus from alloc if network namespace is created
- // Can be nil
+ // NetworkStatus from alloc if network namespace is created.
+ // Can be nil.
NetworkStatus *structs.AllocNetworkStatus
- // AllocatedPorts is the list of port mappings
+ // AllocatedPorts is the list of port mappings.
Ports structs.AllocatedPorts
// DriverExec is the script executor for the task's driver.
// For group services this is nil and script execution is managed by
- // a tasklet in the taskrunner script_check_hook
+ // a tasklet in the taskrunner script_check_hook.
DriverExec interfaces.ScriptExecutor
// DriverNetwork is the network specified by the driver and may be nil.
diff --git a/command/agent/consul/unit_test.go b/command/agent/consul/unit_test.go
index 294f5f0c7..b9c385ae4 100644
--- a/command/agent/consul/unit_test.go
+++ b/command/agent/consul/unit_test.go
@@ -106,10 +106,11 @@ func (t *testFakeCtx) syncOnce(reason syncReason) error {
// A test Workload is also provided.
func setupFake(t *testing.T) *testFakeCtx {
fc := NewMockAgent()
+ nsc := NewNamespacesClient(NewMockNamespaces(nil))
tw := testWorkload()
// by default start fake client being out of probation
- sc := NewServiceClient(fc, testlog.HCLogger(t), true)
+ sc := NewServiceClient(fc, nsc, testlog.HCLogger(t), true)
sc.deregisterProbationExpiry = time.Now().Add(-1 * time.Minute)
return &testFakeCtx{
@@ -135,7 +136,7 @@ func TestConsul_ChangeTags(t *testing.T) {
r.Equal(1, reg1.NumServices())
r.Equal(0, reg1.NumChecks())
- serviceBefore := ctx.FakeConsul.lookupService("taskname-service")[0]
+ serviceBefore := ctx.FakeConsul.lookupService("default", "taskname-service")[0]
r.Equal(serviceBefore.Name, ctx.Workload.Services[0].Name)
r.Equal(serviceBefore.Tags, ctx.Workload.Services[0].Tags)
@@ -149,7 +150,7 @@ func TestConsul_ChangeTags(t *testing.T) {
r.Equal(1, len(ctx.FakeConsul.services), "Expected 1 service to be registered with Consul")
// Validate the consul service definition changed
- serviceAfter := ctx.FakeConsul.lookupService("taskname-service")[0]
+ serviceAfter := ctx.FakeConsul.lookupService("default", "taskname-service")[0]
r.Equal(serviceAfter.Name, ctx.Workload.Services[0].Name)
r.Equal(serviceAfter.Tags, ctx.Workload.Services[0].Tags)
r.Equal("new-tag", serviceAfter.Tags[0])
@@ -177,25 +178,25 @@ func TestConsul_EnableTagOverride_Syncs(t *testing.T) {
const service = "taskname-service"
// check things are what we expect
- consulServiceDefBefore := ctx.FakeConsul.lookupService(service)[0]
+ consulServiceDefBefore := ctx.FakeConsul.lookupService("default", service)[0]
r.Equal(ctx.Workload.Services[0].Name, consulServiceDefBefore.Name)
r.Equal([]string{"tag1", "tag2"}, consulServiceDefBefore.Tags)
r.True(consulServiceDefBefore.EnableTagOverride)
// manually set the tags in consul
- ctx.FakeConsul.lookupService(service)[0].Tags = []string{"new", "tags"}
+ ctx.FakeConsul.lookupService("default", service)[0].Tags = []string{"new", "tags"}
// do a periodic sync (which will respect EnableTagOverride)
r.NoError(ctx.syncOnce(syncPeriodic))
r.Equal(1, len(ctx.FakeConsul.services))
- consulServiceDefAfter := ctx.FakeConsul.lookupService(service)[0]
+ consulServiceDefAfter := ctx.FakeConsul.lookupService("default", service)[0]
r.Equal([]string{"new", "tags"}, consulServiceDefAfter.Tags) // manually set tags should still be there
// now do a new-ops sync (which will override EnableTagOverride)
r.NoError(ctx.ServiceClient.RegisterWorkload(ctx.Workload))
r.NoError(ctx.syncOnce(syncNewOps))
r.Equal(1, len(ctx.FakeConsul.services))
- consulServiceDefUpdated := ctx.FakeConsul.lookupService(service)[0]
+ consulServiceDefUpdated := ctx.FakeConsul.lookupService("default", service)[0]
r.Equal([]string{"tag1", "tag2"}, consulServiceDefUpdated.Tags) // jobspec tags should be set now
}
@@ -233,20 +234,20 @@ func TestConsul_ChangePorts(t *testing.T) {
require.NoError(ctx.ServiceClient.RegisterWorkload(ctx.Workload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Equal(1, len(ctx.FakeConsul.services), "Expected 1 service to be registered with Consul")
+ require.Equal(1, len(ctx.FakeConsul.services["default"]), "Expected 1 service to be registered with Consul")
- for _, v := range ctx.FakeConsul.services {
+ for _, v := range ctx.FakeConsul.services["default"] {
require.Equal(ctx.Workload.Services[0].Name, v.Name)
require.Equal(ctx.Workload.Services[0].Tags, v.Tags)
require.Equal(xPort, v.Port)
}
- require.Len(ctx.FakeConsul.checks, 3)
+ require.Len(ctx.FakeConsul.checks["default"], 3)
origTCPKey := ""
origScriptKey := ""
origHTTPKey := ""
- for k, v := range ctx.FakeConsul.checks {
+ for k, v := range ctx.FakeConsul.checks["default"] {
switch v.Name {
case "c1":
origTCPKey = k
@@ -295,17 +296,17 @@ func TestConsul_ChangePorts(t *testing.T) {
require.NoError(ctx.ServiceClient.UpdateWorkload(origWorkload, ctx.Workload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Equal(1, len(ctx.FakeConsul.services), "Expected 1 service to be registered with Consul")
+ require.Equal(1, len(ctx.FakeConsul.services["default"]), "Expected 1 service to be registered with Consul")
- for _, v := range ctx.FakeConsul.services {
+ for _, v := range ctx.FakeConsul.services["default"] {
require.Equal(ctx.Workload.Services[0].Name, v.Name)
require.Equal(ctx.Workload.Services[0].Tags, v.Tags)
require.Equal(yPort, v.Port)
}
- require.Equal(3, len(ctx.FakeConsul.checks))
+ require.Equal(3, len(ctx.FakeConsul.checks["default"]))
- for k, v := range ctx.FakeConsul.checks {
+ for k, v := range ctx.FakeConsul.checks["default"] {
switch v.Name {
case "c1":
// C1 is changed because the service was re-registered
@@ -348,8 +349,8 @@ func TestConsul_ChangeChecks(t *testing.T) {
t.Fatalf("unexpected error syncing task: %v", err)
}
- if n := len(ctx.FakeConsul.services); n != 1 {
- t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services)
+ if n := len(ctx.FakeConsul.services["default"]); n != 1 {
+ t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services["default"])
}
// Assert a check restart watch update was enqueued and clear it
@@ -376,7 +377,7 @@ func TestConsul_ChangeChecks(t *testing.T) {
}
origServiceKey := ""
- for k, v := range ctx.FakeConsul.services {
+ for k, v := range ctx.FakeConsul.services["default"] {
origServiceKey = k
if v.Name != ctx.Workload.Services[0].Name {
t.Errorf("expected Name=%q != %q", ctx.Workload.Services[0].Name, v.Name)
@@ -386,10 +387,10 @@ func TestConsul_ChangeChecks(t *testing.T) {
}
}
- if n := len(ctx.FakeConsul.checks); n != 1 {
- t.Fatalf("expected 1 check but found %d:\n%#v", n, ctx.FakeConsul.checks)
+ if n := len(ctx.FakeConsul.checks["default"]); n != 1 {
+ t.Fatalf("expected 1 check but found %d:\n%#v", n, ctx.FakeConsul.checks["default"])
}
- for _, v := range ctx.FakeConsul.checks {
+ for _, v := range ctx.FakeConsul.checks["default"] {
if v.Name != "c1" {
t.Fatalf("expected check c1 but found %q", v.Name)
}
@@ -444,19 +445,19 @@ func TestConsul_ChangeChecks(t *testing.T) {
t.Fatalf("unexpected error syncing task: %v", err)
}
- if n := len(ctx.FakeConsul.services); n != 1 {
- t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services)
+ if n := len(ctx.FakeConsul.services["default"]); n != 1 {
+ t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services["default"])
}
- if _, ok := ctx.FakeConsul.services[origServiceKey]; !ok {
+ if _, ok := ctx.FakeConsul.services["default"][origServiceKey]; !ok {
t.Errorf("unexpected key change; was: %q -- but found %#v", origServiceKey, ctx.FakeConsul.services)
}
- if n := len(ctx.FakeConsul.checks); n != 2 {
- t.Fatalf("expected 2 check but found %d:\n%#v", n, ctx.FakeConsul.checks)
+ if n := len(ctx.FakeConsul.checks["default"]); n != 2 {
+ t.Fatalf("expected 2 check but found %d:\n%#v", n, ctx.FakeConsul.checks["default"])
}
- for k, v := range ctx.FakeConsul.checks {
+ for k, v := range ctx.FakeConsul.checks["default"] {
switch v.Name {
case "c1":
if expected := fmt.Sprintf(":%d", xPort); v.TCP != expected {
@@ -538,11 +539,11 @@ func TestConsul_ChangeChecks(t *testing.T) {
t.Fatalf("unexpected error syncing task: %v", err)
}
- if n := len(ctx.FakeConsul.checks); n != 2 {
- t.Fatalf("expected 2 check but found %d:\n%#v", n, ctx.FakeConsul.checks)
+ if n := len(ctx.FakeConsul.checks["default"]); n != 2 {
+ t.Fatalf("expected 2 check but found %d:\n%#v", n, ctx.FakeConsul.checks["default"])
}
- for k, v := range ctx.FakeConsul.checks {
+ for k, v := range ctx.FakeConsul.checks["default"] {
if v.Name == "c1" {
if k != c1ID {
t.Errorf("expected c1 to still have id %q but found %q", c1ID, k)
@@ -582,11 +583,11 @@ func TestConsul_RegServices(t *testing.T) {
t.Fatalf("unexpected error syncing task: %v", err)
}
- if n := len(ctx.FakeConsul.services); n != 1 {
- t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services)
+ if n := len(ctx.FakeConsul.services["default"]); n != 1 {
+ t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services["default"])
}
- for _, v := range ctx.FakeConsul.services {
+ for _, v := range ctx.FakeConsul.services["default"] {
if v.Name != ctx.Workload.Services[0].Name {
t.Errorf("expected Name=%q != %q", ctx.Workload.Services[0].Name, v.Name)
}
@@ -632,10 +633,10 @@ func TestConsul_RegServices(t *testing.T) {
// Make sure changes don't take affect until sync() is called (since
// Run() isn't running)
- if n := len(ctx.FakeConsul.services); n != 1 {
- t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services)
+ if n := len(ctx.FakeConsul.services["default"]); n != 1 {
+ t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services["default"])
}
- for _, v := range ctx.FakeConsul.services {
+ for _, v := range ctx.FakeConsul.services["default"] {
if reflect.DeepEqual(v.Tags, ctx.Workload.Services[0].Tags) {
t.Errorf("expected Tags to differ, changes applied before sync()")
}
@@ -645,11 +646,11 @@ func TestConsul_RegServices(t *testing.T) {
if err := ctx.syncOnce(syncNewOps); err != nil {
t.Fatalf("unexpected error syncing task: %v", err)
}
- if n := len(ctx.FakeConsul.services); n != 2 {
- t.Fatalf("expected 2 services but found %d:\n%#v", n, ctx.FakeConsul.services)
+ if n := len(ctx.FakeConsul.services["default"]); n != 2 {
+ t.Fatalf("expected 2 services but found %d:\n%#v", n, ctx.FakeConsul.services["default"])
}
found := false
- for _, v := range ctx.FakeConsul.services {
+ for _, v := range ctx.FakeConsul.services["default"] {
if v.Name == ctx.Workload.Services[0].Name {
if found {
t.Fatalf("found new service name %q twice", v.Name)
@@ -669,10 +670,10 @@ func TestConsul_RegServices(t *testing.T) {
if err := ctx.syncOnce(syncNewOps); err != nil {
t.Fatalf("unexpected error syncing task: %v", err)
}
- if n := len(ctx.FakeConsul.services); n != 1 {
- t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services)
+ if n := len(ctx.FakeConsul.services["default"]); n != 1 {
+ t.Fatalf("expected 1 service but found %d:\n%#v", n, ctx.FakeConsul.services["default"])
}
- for _, v := range ctx.FakeConsul.services {
+ for _, v := range ctx.FakeConsul.services["default"] {
if v.Name != "taskname-service" {
t.Errorf("expected original task to survive not %q", v.Name)
}
@@ -721,15 +722,14 @@ func TestConsul_ShutdownOK(t *testing.T) {
require.Eventually(ctx.ServiceClient.hasSeen, time.Second, 10*time.Millisecond)
// assert successful registration
- require.Len(ctx.FakeConsul.services, 1, "expected agent service to be registered")
- require.Len(ctx.FakeConsul.checks, 1, "expected agent check to be registered")
- require.Contains(ctx.FakeConsul.services,
- makeAgentServiceID("client", agentServices[0]))
+ require.Len(ctx.FakeConsul.services["default"], 1, "expected agent service to be registered")
+ require.Len(ctx.FakeConsul.checks["default"], 1, "expected agent check to be registered")
+ require.Contains(ctx.FakeConsul.services["default"], makeAgentServiceID("client", agentServices[0]))
// Shutdown() should block until Nomad agent service/check is deregistered
require.NoError(ctx.ServiceClient.Shutdown())
- require.Len(ctx.FakeConsul.services, 0, "expected agent service to be deregistered")
- require.Len(ctx.FakeConsul.checks, 0, "expected agent check to be deregistered")
+ require.Len(ctx.FakeConsul.services["default"], 0, "expected agent service to be deregistered")
+ require.Len(ctx.FakeConsul.checks["default"], 0, "expected agent check to be deregistered")
}
// TestConsul_ShutdownBlocked tests the blocked past deadline path for the
@@ -763,8 +763,8 @@ func TestConsul_ShutdownBlocked(t *testing.T) {
}
require.NoError(ctx.ServiceClient.RegisterAgent("client", agentServices))
require.Eventually(ctx.ServiceClient.hasSeen, time.Second, 10*time.Millisecond)
- require.Len(ctx.FakeConsul.services, 1, "expected agent service to be registered")
- require.Len(ctx.FakeConsul.checks, 1, "expected agent check to be registered")
+ require.Len(ctx.FakeConsul.services["default"], 1, "expected agent service to be registered")
+ require.Len(ctx.FakeConsul.checks["default"], 1, "expected agent check to be registered")
// prevent normal shutdown by blocking Consul. the shutdown should wait
// until agent deregistration has finished
@@ -792,9 +792,9 @@ func TestConsul_ShutdownBlocked(t *testing.T) {
"expected shutdown to take >200ms and <1s")
require.Greater(shutdownTime, 200*time.Millisecond.Seconds(),
"expected shutdown to take >200ms and <1s")
- require.Len(ctx.FakeConsul.services, 0,
+ require.Len(ctx.FakeConsul.services["default"], 0,
"expected agent service to be deregistered")
- require.Len(ctx.FakeConsul.checks, 0,
+ require.Len(ctx.FakeConsul.checks["default"], 0,
"expected agent check to be deregistered")
}
@@ -863,11 +863,11 @@ func TestConsul_DriverNetwork_AutoUse(t *testing.T) {
t.Fatalf("unexpected error syncing task: %v", err)
}
- if n := len(ctx.FakeConsul.services); n != 3 {
+ if n := len(ctx.FakeConsul.services["default"]); n != 3 {
t.Fatalf("expected 2 services but found: %d", n)
}
- for _, v := range ctx.FakeConsul.services {
+ for _, v := range ctx.FakeConsul.services["default"] {
switch v.Name {
case ctx.Workload.Services[0].Name: // x
// Since DriverNetwork.AutoAdvertise=true, driver ports should be used
@@ -967,11 +967,11 @@ func TestConsul_DriverNetwork_NoAutoUse(t *testing.T) {
t.Fatalf("unexpected error syncing task: %v", err)
}
- if n := len(ctx.FakeConsul.services); n != 3 {
+ if n := len(ctx.FakeConsul.services["default"]); n != 3 {
t.Fatalf("expected 3 services but found: %d", n)
}
- for _, v := range ctx.FakeConsul.services {
+ for _, v := range ctx.FakeConsul.services["default"] {
switch v.Name {
case ctx.Workload.Services[0].Name: // x + auto
// Since DriverNetwork.AutoAdvertise=false, host ports should be used
@@ -1028,11 +1028,11 @@ func TestConsul_DriverNetwork_Change(t *testing.T) {
t.Fatalf("unexpected error syncing task: %v", err)
}
- if n := len(ctx.FakeConsul.services); n != 1 {
+ if n := len(ctx.FakeConsul.services["default"]); n != 1 {
t.Fatalf("expected 1 service but found: %d", n)
}
- for _, v := range ctx.FakeConsul.services {
+ for _, v := range ctx.FakeConsul.services["default"] {
switch v.Name {
case ctx.Workload.Services[0].Name:
if v.Port != port {
@@ -1085,8 +1085,8 @@ func TestConsul_CanaryTags(t *testing.T) {
require.NoError(ctx.ServiceClient.RegisterWorkload(ctx.Workload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 1)
- for _, service := range ctx.FakeConsul.services {
+ require.Len(ctx.FakeConsul.services["default"], 1)
+ for _, service := range ctx.FakeConsul.services["default"] {
require.Equal(canaryTags, service.Tags)
}
@@ -1095,14 +1095,14 @@ func TestConsul_CanaryTags(t *testing.T) {
ctx.Workload.Canary = false
require.NoError(ctx.ServiceClient.UpdateWorkload(origWorkload, ctx.Workload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 1)
- for _, service := range ctx.FakeConsul.services {
+ require.Len(ctx.FakeConsul.services["default"], 1)
+ for _, service := range ctx.FakeConsul.services["default"] {
require.NotEqual(canaryTags, service.Tags)
}
ctx.ServiceClient.RemoveWorkload(ctx.Workload)
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 0)
+ require.Len(ctx.FakeConsul.services["default"], 0)
}
// TestConsul_CanaryTags_NoTags asserts Tags are used when Canary=true and there
@@ -1118,8 +1118,8 @@ func TestConsul_CanaryTags_NoTags(t *testing.T) {
require.NoError(ctx.ServiceClient.RegisterWorkload(ctx.Workload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 1)
- for _, service := range ctx.FakeConsul.services {
+ require.Len(ctx.FakeConsul.services["default"], 1)
+ for _, service := range ctx.FakeConsul.services["default"] {
require.Equal(tags, service.Tags)
}
@@ -1128,14 +1128,14 @@ func TestConsul_CanaryTags_NoTags(t *testing.T) {
ctx.Workload.Canary = false
require.NoError(ctx.ServiceClient.UpdateWorkload(origWorkload, ctx.Workload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 1)
- for _, service := range ctx.FakeConsul.services {
+ require.Len(ctx.FakeConsul.services["default"], 1)
+ for _, service := range ctx.FakeConsul.services["default"] {
require.Equal(tags, service.Tags)
}
ctx.ServiceClient.RemoveWorkload(ctx.Workload)
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 0)
+ require.Len(ctx.FakeConsul.services["default"], 0)
}
// TestConsul_CanaryMeta asserts CanaryMeta are used when Canary=true
@@ -1151,8 +1151,8 @@ func TestConsul_CanaryMeta(t *testing.T) {
require.NoError(ctx.ServiceClient.RegisterWorkload(ctx.Workload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 1)
- for _, service := range ctx.FakeConsul.services {
+ require.Len(ctx.FakeConsul.services["default"], 1)
+ for _, service := range ctx.FakeConsul.services["default"] {
require.Equal(canaryMeta, service.Meta)
}
@@ -1161,14 +1161,14 @@ func TestConsul_CanaryMeta(t *testing.T) {
ctx.Workload.Canary = false
require.NoError(ctx.ServiceClient.UpdateWorkload(origWorkload, ctx.Workload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 1)
- for _, service := range ctx.FakeConsul.services {
+ require.Len(ctx.FakeConsul.services["default"], 1)
+ for _, service := range ctx.FakeConsul.services["default"] {
require.NotEqual(canaryMeta, service.Meta)
}
ctx.ServiceClient.RemoveWorkload(ctx.Workload)
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 0)
+ require.Len(ctx.FakeConsul.services["default"], 0)
}
// TestConsul_CanaryMeta_NoMeta asserts Meta are used when Canary=true and there
@@ -1185,8 +1185,8 @@ func TestConsul_CanaryMeta_NoMeta(t *testing.T) {
require.NoError(ctx.ServiceClient.RegisterWorkload(ctx.Workload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 1)
- for _, service := range ctx.FakeConsul.services {
+ require.Len(ctx.FakeConsul.services["default"], 1)
+ for _, service := range ctx.FakeConsul.services["default"] {
require.Equal(meta, service.Meta)
}
@@ -1195,14 +1195,14 @@ func TestConsul_CanaryMeta_NoMeta(t *testing.T) {
ctx.Workload.Canary = false
require.NoError(ctx.ServiceClient.UpdateWorkload(origWorkload, ctx.Workload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 1)
- for _, service := range ctx.FakeConsul.services {
+ require.Len(ctx.FakeConsul.services["default"], 1)
+ for _, service := range ctx.FakeConsul.services["default"] {
require.Equal(meta, service.Meta)
}
ctx.ServiceClient.RemoveWorkload(ctx.Workload)
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 0)
+ require.Len(ctx.FakeConsul.services["default"], 0)
}
// TestConsul_PeriodicSync asserts that Nomad periodically reconciles with
@@ -1284,8 +1284,10 @@ func TestCreateCheckReg_HTTP(t *testing.T) {
checkID := check.Hash(serviceID)
host := "localhost"
port := 41111
+ namespace := ""
expected := &api.AgentCheckRegistration{
+ Namespace: namespace,
ID: checkID,
Name: "name",
ServiceID: serviceID,
@@ -1300,7 +1302,7 @@ func TestCreateCheckReg_HTTP(t *testing.T) {
},
}
- actual, err := createCheckReg(serviceID, checkID, check, host, port)
+ actual, err := createCheckReg(serviceID, checkID, check, host, port, namespace)
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1329,6 +1331,7 @@ func TestCreateCheckReg_GRPC(t *testing.T) {
checkID := check.Hash(serviceID)
expected := &api.AgentCheckRegistration{
+ Namespace: "",
ID: checkID,
Name: "name",
ServiceID: serviceID,
@@ -1341,7 +1344,7 @@ func TestCreateCheckReg_GRPC(t *testing.T) {
},
}
- actual, err := createCheckReg(serviceID, checkID, check, "localhost", 8080)
+ actual, err := createCheckReg(serviceID, checkID, check, "localhost", 8080, "default")
require.NoError(t, err)
require.Equal(t, expected, actual)
}
@@ -1747,9 +1750,9 @@ func TestConsul_ServiceName_Duplicates(t *testing.T) {
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 3)
+ require.Len(ctx.FakeConsul.services["default"], 3)
- for _, v := range ctx.FakeConsul.services {
+ for _, v := range ctx.FakeConsul.services["default"] {
if v.Name == ctx.Workload.Services[0].Name && v.Port == xPort {
require.ElementsMatch(v.Tags, ctx.Workload.Services[0].Tags)
require.Len(v.Checks, 1)
@@ -1815,8 +1818,8 @@ func TestConsul_ServiceDeregistration_OutProbation(t *testing.T) {
require.NoError(ctx.ServiceClient.RegisterWorkload(explicitlyRemovedWorkload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 2)
- require.Len(ctx.FakeConsul.checks, 2)
+ require.Len(ctx.FakeConsul.services["default"], 2)
+ require.Len(ctx.FakeConsul.checks["default"], 2)
// we register a task through nomad API then remove it out of band
outofbandWorkload := testWorkload()
@@ -1840,7 +1843,7 @@ func TestConsul_ServiceDeregistration_OutProbation(t *testing.T) {
require.NoError(ctx.ServiceClient.RegisterWorkload(outofbandWorkload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 3)
+ require.Len(ctx.FakeConsul.services["default"], 3)
// remove outofbandWorkload from local services so it appears unknown to client
require.Len(ctx.ServiceClient.services, 3)
@@ -1857,16 +1860,16 @@ func TestConsul_ServiceDeregistration_OutProbation(t *testing.T) {
ctx.ServiceClient.RemoveWorkload(explicitlyRemovedWorkload)
require.NoError(ctx.syncOnce(syncNewOps))
require.NoError(ctx.ServiceClient.sync(syncNewOps))
- require.Len(ctx.FakeConsul.services, 1)
- require.Len(ctx.FakeConsul.checks, 1)
+ require.Len(ctx.FakeConsul.services["default"], 1)
+ require.Len(ctx.FakeConsul.checks["default"], 1)
- require.Contains(ctx.FakeConsul.services, remainingWorkloadServiceID)
- require.NotContains(ctx.FakeConsul.services, outofbandWorkloadServiceID)
- require.NotContains(ctx.FakeConsul.services, explicitlyRemovedWorkloadServiceID)
+ require.Contains(ctx.FakeConsul.services["default"], remainingWorkloadServiceID)
+ require.NotContains(ctx.FakeConsul.services["default"], outofbandWorkloadServiceID)
+ require.NotContains(ctx.FakeConsul.services["default"], explicitlyRemovedWorkloadServiceID)
- require.Contains(ctx.FakeConsul.checks, MakeCheckID(remainingWorkloadServiceID, remainingWorkload.Services[0].Checks[0]))
- require.NotContains(ctx.FakeConsul.checks, MakeCheckID(outofbandWorkloadServiceID, outofbandWorkload.Services[0].Checks[0]))
- require.NotContains(ctx.FakeConsul.checks, MakeCheckID(explicitlyRemovedWorkloadServiceID, explicitlyRemovedWorkload.Services[0].Checks[0]))
+ require.Contains(ctx.FakeConsul.checks["default"], MakeCheckID(remainingWorkloadServiceID, remainingWorkload.Services[0].Checks[0]))
+ require.NotContains(ctx.FakeConsul.checks["default"], MakeCheckID(outofbandWorkloadServiceID, outofbandWorkload.Services[0].Checks[0]))
+ require.NotContains(ctx.FakeConsul.checks["default"], MakeCheckID(explicitlyRemovedWorkloadServiceID, explicitlyRemovedWorkload.Services[0].Checks[0]))
}
// TestConsul_ServiceDeregistration_InProbation asserts that during initialization
@@ -1924,8 +1927,8 @@ func TestConsul_ServiceDeregistration_InProbation(t *testing.T) {
require.NoError(ctx.ServiceClient.RegisterWorkload(explicitlyRemovedWorkload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 2)
- require.Len(ctx.FakeConsul.checks, 2)
+ require.Len(ctx.FakeConsul.services["default"], 2)
+ require.Len(ctx.FakeConsul.checks["default"], 2)
// we register a task through nomad API then remove it out of band
outofbandWorkload := testWorkload()
@@ -1949,7 +1952,7 @@ func TestConsul_ServiceDeregistration_InProbation(t *testing.T) {
require.NoError(ctx.ServiceClient.RegisterWorkload(outofbandWorkload))
require.NoError(ctx.syncOnce(syncNewOps))
- require.Len(ctx.FakeConsul.services, 3)
+ require.Len(ctx.FakeConsul.services["default"], 3)
// remove outofbandWorkload from local services so it appears unknown to client
require.Len(ctx.ServiceClient.services, 3)
@@ -1966,30 +1969,29 @@ func TestConsul_ServiceDeregistration_InProbation(t *testing.T) {
ctx.ServiceClient.RemoveWorkload(explicitlyRemovedWorkload)
require.NoError(ctx.syncOnce(syncNewOps))
require.NoError(ctx.ServiceClient.sync(syncNewOps))
- require.Len(ctx.FakeConsul.services, 2)
- require.Len(ctx.FakeConsul.checks, 2)
+ require.Len(ctx.FakeConsul.services["default"], 2)
+ require.Len(ctx.FakeConsul.checks["default"], 2)
- require.Contains(ctx.FakeConsul.services, remainingWorkloadServiceID)
- require.Contains(ctx.FakeConsul.services, outofbandWorkloadServiceID)
- require.NotContains(ctx.FakeConsul.services, explicitlyRemovedWorkloadServiceID)
+ require.Contains(ctx.FakeConsul.services["default"], remainingWorkloadServiceID)
+ require.Contains(ctx.FakeConsul.services["default"], outofbandWorkloadServiceID)
+ require.NotContains(ctx.FakeConsul.services["default"], explicitlyRemovedWorkloadServiceID)
- require.Contains(ctx.FakeConsul.checks, MakeCheckID(remainingWorkloadServiceID, remainingWorkload.Services[0].Checks[0]))
- require.Contains(ctx.FakeConsul.checks, MakeCheckID(outofbandWorkloadServiceID, outofbandWorkload.Services[0].Checks[0]))
- require.NotContains(ctx.FakeConsul.checks, MakeCheckID(explicitlyRemovedWorkloadServiceID, explicitlyRemovedWorkload.Services[0].Checks[0]))
+ require.Contains(ctx.FakeConsul.checks["default"], MakeCheckID(remainingWorkloadServiceID, remainingWorkload.Services[0].Checks[0]))
+ require.Contains(ctx.FakeConsul.checks["default"], MakeCheckID(outofbandWorkloadServiceID, outofbandWorkload.Services[0].Checks[0]))
+ require.NotContains(ctx.FakeConsul.checks["default"], MakeCheckID(explicitlyRemovedWorkloadServiceID, explicitlyRemovedWorkload.Services[0].Checks[0]))
// after probation, outofband services and checks are removed
ctx.ServiceClient.deregisterProbationExpiry = time.Now().Add(-1 * time.Hour)
require.NoError(ctx.ServiceClient.sync(syncNewOps))
- require.Len(ctx.FakeConsul.services, 1)
- require.Len(ctx.FakeConsul.checks, 1)
+ require.Len(ctx.FakeConsul.services["default"], 1)
+ require.Len(ctx.FakeConsul.checks["default"], 1)
- require.Contains(ctx.FakeConsul.services, remainingWorkloadServiceID)
- require.NotContains(ctx.FakeConsul.services, outofbandWorkloadServiceID)
- require.NotContains(ctx.FakeConsul.services, explicitlyRemovedWorkloadServiceID)
-
- require.Contains(ctx.FakeConsul.checks, MakeCheckID(remainingWorkloadServiceID, remainingWorkload.Services[0].Checks[0]))
- require.NotContains(ctx.FakeConsul.checks, MakeCheckID(outofbandWorkloadServiceID, outofbandWorkload.Services[0].Checks[0]))
- require.NotContains(ctx.FakeConsul.checks, MakeCheckID(explicitlyRemovedWorkloadServiceID, explicitlyRemovedWorkload.Services[0].Checks[0]))
+ require.Contains(ctx.FakeConsul.services["default"], remainingWorkloadServiceID)
+ require.NotContains(ctx.FakeConsul.services["default"], outofbandWorkloadServiceID)
+ require.NotContains(ctx.FakeConsul.services["default"], explicitlyRemovedWorkloadServiceID)
+ require.Contains(ctx.FakeConsul.checks["default"], MakeCheckID(remainingWorkloadServiceID, remainingWorkload.Services[0].Checks[0]))
+ require.NotContains(ctx.FakeConsul.checks["default"], MakeCheckID(outofbandWorkloadServiceID, outofbandWorkload.Services[0].Checks[0]))
+ require.NotContains(ctx.FakeConsul.checks["default"], MakeCheckID(explicitlyRemovedWorkloadServiceID, explicitlyRemovedWorkload.Services[0].Checks[0]))
}
diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go
index 3b358c447..a4a5c57b3 100644
--- a/command/agent/job_endpoint.go
+++ b/command/agent/job_endpoint.go
@@ -7,7 +7,6 @@ import (
"strings"
"github.com/golang/snappy"
-
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/jobspec"
@@ -877,6 +876,7 @@ func ApiTgToStructsTG(job *structs.Job, taskGroup *api.TaskGroup, tg *structs.Ta
tg.Affinities = ApiAffinitiesToStructs(taskGroup.Affinities)
tg.Networks = ApiNetworkResourceToStructs(taskGroup.Networks)
tg.Services = ApiServicesToStructs(taskGroup.Services)
+ tg.Consul = apiConsulToStructs(taskGroup.Consul)
tg.RestartPolicy = &structs.RestartPolicy{
Attempts: *taskGroup.RestartPolicy.Attempts,
@@ -1575,6 +1575,15 @@ func apiConnectSidecarTaskToStructs(in *api.SidecarTask) *structs.SidecarTask {
}
}
+func apiConsulToStructs(in *api.Consul) *structs.Consul {
+ if in == nil {
+ return nil
+ }
+ return &structs.Consul{
+ Namespace: in.Namespace,
+ }
+}
+
func apiLogConfigToStructs(in *api.LogConfig) *structs.LogConfig {
if in == nil {
return nil
diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go
index 099eb2a60..650c32bb5 100644
--- a/command/agent/job_endpoint_test.go
+++ b/command/agent/job_endpoint_test.go
@@ -1928,6 +1928,9 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
Meta: map[string]string{
"key": "value",
},
+ Consul: &api.Consul{
+ Namespace: "team-foo",
+ },
Services: []*api.Service{
{
Name: "groupserviceA",
@@ -2304,6 +2307,9 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
Meta: map[string]string{
"key": "value",
},
+ Consul: &structs.Consul{
+ Namespace: "team-foo",
+ },
Services: []*structs.Service{
{
Name: "groupserviceA",
@@ -2581,6 +2587,9 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
Meta: map[string]string{
"key": "value",
},
+ Consul: &api.Consul{
+ Namespace: "foo",
+ },
Tasks: []*api.Task{
{
Name: "task1",
@@ -2698,6 +2707,9 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
Meta: map[string]string{
"key": "value",
},
+ Consul: &structs.Consul{
+ Namespace: "foo",
+ },
Tasks: []*structs.Task{
{
Name: "task1",
diff --git a/command/job_init.bindata_assetfs.go b/command/job_init.bindata_assetfs.go
index 8b0c01d37..9677c0860 100644
--- a/command/job_init.bindata_assetfs.go
+++ b/command/job_init.bindata_assetfs.go
@@ -87,7 +87,7 @@ func commandAssetsConnectShortNomad() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "command/assets/connect-short.nomad", size: 997, mode: os.FileMode(436), modTime: time.Unix(1612560436, 0)}
+ info := bindataFileInfo{name: "command/assets/connect-short.nomad", size: 997, mode: os.FileMode(436), modTime: time.Unix(1616684356, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -107,7 +107,7 @@ func commandAssetsConnectNomad() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "command/assets/connect.nomad", size: 17842, mode: os.FileMode(436), modTime: time.Unix(1612560436, 0)}
+ info := bindataFileInfo{name: "command/assets/connect.nomad", size: 17842, mode: os.FileMode(436), modTime: time.Unix(1616684356, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -127,7 +127,7 @@ func commandAssetsExampleShortNomad() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "command/assets/example-short.nomad", size: 324, mode: os.FileMode(436), modTime: time.Unix(1612560436, 0)}
+ info := bindataFileInfo{name: "command/assets/example-short.nomad", size: 324, mode: os.FileMode(436), modTime: time.Unix(1616684356, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -147,7 +147,7 @@ func commandAssetsExampleNomad() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "command/assets/example.nomad", size: 16057, mode: os.FileMode(436), modTime: time.Unix(1612560436, 0)}
+ info := bindataFileInfo{name: "command/assets/example.nomad", size: 16057, mode: os.FileMode(436), modTime: time.Unix(1616684356, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
diff --git a/command/job_run.go b/command/job_run.go
index ba683dcaf..77bd4b2b3 100644
--- a/command/job_run.go
+++ b/command/job_run.go
@@ -157,7 +157,7 @@ func (c *JobRunCommand) Name() string { return "job run" }
func (c *JobRunCommand) Run(args []string) int {
var detach, verbose, output, override, preserveCounts bool
- var checkIndexStr, consulToken, vaultToken, vaultNamespace string
+ var checkIndexStr, consulToken, consulNamespace, vaultToken, vaultNamespace string
var varArgs, varFiles flaghelper.StringFlag
flagSet := c.Meta.FlagSet(c.Name(), FlagSetClient)
@@ -170,6 +170,7 @@ func (c *JobRunCommand) Run(args []string) int {
flagSet.BoolVar(&c.JobGetter.hcl1, "hcl1", false, "")
flagSet.StringVar(&checkIndexStr, "check-index", "", "")
flagSet.StringVar(&consulToken, "consul-token", "", "")
+ flagSet.StringVar(&consulNamespace, "consul-namespace", "", "")
flagSet.StringVar(&vaultToken, "vault-token", "", "")
flagSet.StringVar(&vaultNamespace, "vault-namespace", "", "")
flagSet.Var(&varArgs, "var", "")
@@ -232,6 +233,10 @@ func (c *JobRunCommand) Run(args []string) int {
job.ConsulToken = helper.StringToPtr(consulToken)
}
+ if consulNamespace != "" {
+ job.ConsulNamespace = helper.StringToPtr(consulNamespace)
+ }
+
// Parse the Vault token
if vaultToken == "" {
// Check the environment variable
diff --git a/go.mod b/go.mod
index d1fe7183e..109fd7388 100644
--- a/go.mod
+++ b/go.mod
@@ -8,6 +8,8 @@ replace (
github.com/apparentlymart/go-textseg/v12 => github.com/apparentlymart/go-textseg/v12 v12.0.0
github.com/godbus/dbus => github.com/godbus/dbus v5.0.1+incompatible
github.com/golang/protobuf => github.com/golang/protobuf v1.3.4
+
+ github.com/hashicorp/consul/api => github.com/hashicorp/consul/api v1.4.1-0.20210319180826-cd1cd4febd26
github.com/hashicorp/go-discover => github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f
github.com/hashicorp/hcl => github.com/hashicorp/hcl v1.0.1-0.20201016140508-a07e7d50bbee
github.com/hashicorp/nomad/api => ./api
diff --git a/go.sum b/go.sum
index c311c23a3..fcee0ca80 100644
--- a/go.sum
+++ b/go.sum
@@ -331,9 +331,8 @@ github.com/hashicorp/consul v1.7.8 h1:hp308KxAf3zWoGuwp2e+0UUhrm6qHjeBQk3jCZ+bjc
github.com/hashicorp/consul v1.7.8/go.mod h1:urbfGaVZDmnXC6geg0LYPh/SRUk1E8nfmDHpz+Q0nLw=
github.com/hashicorp/consul-template v0.25.1 h1:+D2s8eyRqWyX7GPNxeUi8tsyh8pRn3J6k8giEchPfKQ=
github.com/hashicorp/consul-template v0.25.1/go.mod h1:/vUsrJvDuuQHcxEw0zik+YXTS7ZKWZjQeaQhshBmfH0=
-github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU=
-github.com/hashicorp/consul/api v1.8.1 h1:BOEQaMWoGMhmQ29fC26bi0qb7/rId9JzZP2V0Xmx7m8=
-github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk=
+github.com/hashicorp/consul/api v1.4.1-0.20210319180826-cd1cd4febd26 h1:spd0xeuuu98mp2iMldM4Y5v0D6oc5yPHpvovzbwpDcU=
+github.com/hashicorp/consul/api v1.4.1-0.20210319180826-cd1cd4febd26/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk=
github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
github.com/hashicorp/consul/sdk v0.7.0 h1:H6R9d008jDcHPQPAqPNuydAshJ4v5/8URdFnUvK/+sc=
github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
@@ -434,7 +433,6 @@ github.com/hashicorp/raft v1.1.3-0.20200211192230-365023de17e6 h1:+H0NF++gFCFqQQ
github.com/hashicorp/raft v1.1.3-0.20200211192230-365023de17e6/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4=
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k=
github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM=
@@ -538,11 +536,9 @@ github.com/mitchellh/go-testing-interface v1.0.3/go.mod h1:kRemZodwjscx+RGhAo8eI
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y=
github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.2.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
diff --git a/nomad/consul.go b/nomad/consul.go
index c3d0a7d26..f9126fd97 100644
--- a/nomad/consul.go
+++ b/nomad/consul.go
@@ -58,13 +58,22 @@ const (
// service "web" { policy = "write" }
// service_prefix "" { policy = "write" }
ConsulPolicyWrite = "write"
+
+ // ConsulPolicyRead is the literal text of the policy field of a Consul Policy
+ // Rule that we check when validating a job-submitter Consul token against the
+ // necessary permissions for reading the key-value store.
+ //
+ // The only acceptable rule is
+ // - service_prefix "" { policy = "read|write" }
+ ConsulPolicyRead = "read"
)
type ServiceIdentityRequest struct {
- TaskKind structs.TaskKind
- TaskName string
- ClusterID string
- AllocID string
+ ConsulNamespace string
+ TaskKind structs.TaskKind
+ TaskName string
+ ClusterID string
+ AllocID string
}
func (sir ServiceIdentityRequest) Validate() error {
@@ -92,10 +101,10 @@ func (sir ServiceIdentityRequest) Description() string {
// ACL requirements
// - acl:write (transitive through ACLsAPI)
type ConsulACLsAPI interface {
-
- // CheckSIPolicy checks that the given operator token has the equivalent ACL
- // permissiveness that a Service Identity token policy for task would have.
- CheckSIPolicy(ctx context.Context, task, secretID string) error
+ // CheckPermissions checks that the given Consul token has the necessary ACL
+ // permissions for each way that Consul is used as indicated by usage,
+ // returning an error if not.
+ CheckPermissions(ctx context.Context, namespace string, usage *structs.ConsulUsage, secretID string) error
// Create instructs Consul to create a Service Identity token.
CreateToken(context.Context, ServiceIdentityRequest) (*structs.SIToken, error)
@@ -109,10 +118,6 @@ type ConsulACLsAPI interface {
// Stop is used to stop background token revocations. Intended to be used
// on Nomad Server shutdown.
Stop()
-
- // todo(shoenig): use list endpoint for finding orphaned tokens
- // ListTokens lists every token in Consul.
- // ListTokens() ([]string, error)
}
// PurgeSITokenAccessorFunc is called to remove SI Token accessors from the
@@ -181,32 +186,82 @@ func (c *consulACLsAPI) Stop() {
c.bgRevokeStopped = true
}
-func (c *consulACLsAPI) CheckSIPolicy(ctx context.Context, task, secretID string) error {
- defer metrics.MeasureSince([]string{"nomad", "consul", "check_si_policy"}, time.Now())
+func (c *consulACLsAPI) readToken(ctx context.Context, secretID string) (*api.ACLToken, error) {
+ defer metrics.MeasureSince([]string{"nomad", "consul", "read_token"}, time.Now())
- if id := strings.TrimSpace(secretID); id == "" {
- return errors.New("missing consul token")
+ if id := strings.TrimSpace(secretID); !helper.IsUUID(id) {
+ return nil, errors.New("missing consul token")
}
// Ensure we are under our rate limit.
if err := c.limiter.Wait(ctx); err != nil {
- return err
+ return nil, errors.Wrap(err, "unable to read consul token")
}
- opToken, _, err := c.aclClient.TokenReadSelf(&api.QueryOptions{
+ consulToken, _, err := c.aclClient.TokenReadSelf(&api.QueryOptions{
AllowStale: false,
Token: secretID,
})
if err != nil {
- return errors.Wrap(err, "unable to validate operator consul token")
+ return nil, errors.Wrap(err, "unable to read consul token")
}
- allowable, err := c.hasSufficientPolicy(task, opToken)
- if err != nil {
- return errors.Wrap(err, "unable to validate operator consul token")
+ return consulToken, nil
+}
+
+func (c *consulACLsAPI) CheckPermissions(ctx context.Context, namespace string, usage *structs.ConsulUsage, secretID string) error {
+ // consul not used, nothing to check
+ if !usage.Used() {
+ return nil
}
- if !allowable {
- return errors.Errorf("permission denied for %q", task)
+
+ // If namespace is not declared on nomad jobs, assume default consul namespace
+ // when comparing with the consul ACL token. This maintains backwards compatibility
+ // with existing connect jobs, which may already be authorized with Consul tokens.
+ if namespace == "" {
+ namespace = "default"
+ }
+
+ // lookup the token from consul
+ token, err := c.readToken(ctx, secretID)
+ if err != nil {
+ return err
+ }
+
+ // verify the token namespace matches namespace in job
+ if token.Namespace != namespace {
+ return errors.Errorf("consul ACL token cannot use namespace %q", namespace)
+ }
+
+ // verify token has keystore read permission, if using template
+ if usage.KV {
+ allowable, err := c.canReadKeystore(token)
+ if err != nil {
+ return err
+ } else if !allowable {
+ return errors.New("insufficient Consul ACL permissions to use template")
+ }
+ }
+
+ // verify token has service write permission for group+task services
+ for _, service := range usage.Services {
+ allowable, err := c.canWriteService(service, token)
+ if err != nil {
+ return err
+ } else if !allowable {
+ return errors.Errorf("insufficient Consul ACL permissions to write service %q", service)
+ }
+ }
+
+ // verify token has service identity permission for connect services
+ for _, kind := range usage.Kinds {
+ service := kind.Value()
+ allowable, err := c.canWriteService(service, token)
+ if err != nil {
+ return err
+ } else if !allowable {
+ return errors.Errorf("insufficient Consul ACL permissions to write Connect service %q", service)
+ }
}
return nil
@@ -235,6 +290,7 @@ func (c *consulACLsAPI) CreateToken(ctx context.Context, sir ServiceIdentityRequ
partial := &api.ACLToken{
Description: sir.Description(),
ServiceIdentities: []*api.ACLServiceIdentity{{ServiceName: service}},
+ Namespace: sir.ConsulNamespace,
}
// Ensure we are under our rate limit.
@@ -248,9 +304,10 @@ func (c *consulACLsAPI) CreateToken(ctx context.Context, sir ServiceIdentityRequ
}
return &structs.SIToken{
- TaskName: sir.TaskName,
- AccessorID: token.AccessorID,
- SecretID: token.SecretID,
+ ConsulNamespace: token.Namespace,
+ AccessorID: token.AccessorID,
+ SecretID: token.SecretID,
+ TaskName: sir.TaskName,
}, nil
}
@@ -370,7 +427,7 @@ func (c *consulACLsAPI) singleRevoke(ctx context.Context, accessor *structs.SITo
}
// Consul will no-op the deletion of a non-existent token (no error)
- _, err := c.aclClient.TokenDelete(accessor.AccessorID, nil)
+ _, err := c.aclClient.TokenDelete(accessor.AccessorID, &api.WriteOptions{Namespace: accessor.ConsulNamespace})
return err
}
diff --git a/nomad/consul_policy.go b/nomad/consul_policy.go
index 3630a4df7..b7ba5fecb 100644
--- a/nomad/consul_policy.go
+++ b/nomad/consul_policy.go
@@ -14,20 +14,29 @@ type ConsulServiceRule struct {
Policy string
}
+// ConsulKeyRule represents a policy for the keystore.
+type ConsulKeyRule struct {
+ Name string `hcl:",key"`
+ Policy string
+}
+
// ConsulPolicy represents the parts of a ConsulServiceRule Policy that are
// relevant to Service Identity authorizations.
type ConsulPolicy struct {
Services []*ConsulServiceRule `hcl:"service,expand"`
ServicePrefixes []*ConsulServiceRule `hcl:"service_prefix,expand"`
+ KeyPrefixes []*ConsulKeyRule `hcl:"key_prefix,expand"`
}
-// IsEmpty returns true if there are no Services or ServicePrefixes defined for
-// the ConsulPolicy.
+// IsEmpty returns true if there are no Services, ServicePrefixes, or KeyPrefixes
+// defined for the ConsulPolicy.
func (cp *ConsulPolicy) IsEmpty() bool {
if cp == nil {
return true
}
- return len(cp.Services) == 0 && len(cp.ServicePrefixes) == 0
+
+ policies := len(cp.Services) + len(cp.ServicePrefixes) + len(cp.KeyPrefixes)
+ return policies == 0
}
// ParseConsulPolicy parses raw string s into a ConsulPolicy. An error is
@@ -45,10 +54,10 @@ func ParseConsulPolicy(s string) (*ConsulPolicy, error) {
return cp, nil
}
-func (c *consulACLsAPI) hasSufficientPolicy(task string, token *api.ACLToken) (bool, error) {
+func (c *consulACLsAPI) canReadKeystore(token *api.ACLToken) (bool, error) {
// check each policy directly attached to the token
for _, policyRef := range token.Policies {
- if allowable, err := c.policyAllowsServiceWrite(task, policyRef.ID); err != nil {
+ if allowable, err := c.policyAllowsKeystoreRead(policyRef.ID); err != nil {
return false, err
} else if allowable {
return true, nil
@@ -65,11 +74,10 @@ func (c *consulACLsAPI) hasSufficientPolicy(task string, token *api.ACLToken) (b
}
for _, policyLink := range role.Policies {
- allowable, err := c.policyAllowsServiceWrite(task, policyLink.ID)
+ allowable, err := c.policyAllowsKeystoreRead(policyLink.ID)
if err != nil {
return false, err
- }
- if allowable {
+ } else if allowable {
return true, nil
}
}
@@ -78,7 +86,39 @@ func (c *consulACLsAPI) hasSufficientPolicy(task string, token *api.ACLToken) (b
return false, nil
}
-func (c *consulACLsAPI) policyAllowsServiceWrite(task string, policyID string) (bool, error) {
+func (c *consulACLsAPI) canWriteService(service string, token *api.ACLToken) (bool, error) {
+ // check each policy directly attached to the token
+ for _, policyRef := range token.Policies {
+ if allowable, err := c.policyAllowsServiceWrite(service, policyRef.ID); err != nil {
+ return false, err
+ } else if allowable {
+ return true, nil
+ }
+ }
+
+ // check each policy on each role attached to the token
+ for _, roleLink := range token.Roles {
+ role, _, err := c.aclClient.RoleRead(roleLink.ID, &api.QueryOptions{
+ AllowStale: false,
+ })
+ if err != nil {
+ return false, err
+ }
+
+ for _, policyLink := range role.Policies {
+ allowable, err := c.policyAllowsServiceWrite(service, policyLink.ID)
+ if err != nil {
+ return false, err
+ } else if allowable {
+ return true, nil
+ }
+ }
+ }
+
+ return false, nil
+}
+
+func (c *consulACLsAPI) policyAllowsServiceWrite(service string, policyID string) (bool, error) {
policy, _, err := c.aclClient.PolicyRead(policyID, &api.QueryOptions{
AllowStale: false,
})
@@ -94,7 +134,7 @@ func (c *consulACLsAPI) policyAllowsServiceWrite(task string, policyID string) (
return false, err
}
- if cp.allowsServiceWrite(task) {
+ if cp.allowsServiceWrite(service) {
return true, nil
}
@@ -127,3 +167,36 @@ func (cp *ConsulPolicy) allowsServiceWrite(task string) bool {
}
return false
}
+
+func (c *consulACLsAPI) policyAllowsKeystoreRead(policyID string) (bool, error) {
+ policy, _, err := c.aclClient.PolicyRead(policyID, &api.QueryOptions{
+ AllowStale: false,
+ })
+ if err != nil {
+ return false, err
+ }
+
+ cp, err := ParseConsulPolicy(policy.Rules)
+ if err != nil {
+ return false, err
+ }
+
+ if cp.allowsKeystoreRead() {
+ return true, nil
+ }
+
+ return false, nil
+}
+
+func (cp *ConsulPolicy) allowsKeystoreRead() bool {
+ for _, keyPrefix := range cp.KeyPrefixes {
+ name := strings.ToLower(keyPrefix.Name)
+ policy := strings.ToLower(keyPrefix.Policy)
+ if name == "" {
+ if policy == ConsulPolicyWrite || policy == ConsulPolicyRead {
+ return true
+ }
+ }
+ }
+ return false
+}
diff --git a/nomad/consul_policy_test.go b/nomad/consul_policy_test.go
index f7cde067f..a38d57e60 100644
--- a/nomad/consul_policy_test.go
+++ b/nomad/consul_policy_test.go
@@ -183,7 +183,7 @@ func TestConsulACLsAPI_hasSufficientPolicy(t *testing.T) {
aclClient: consul.NewMockACLsAPI(logger),
logger: logger,
}
- result, err := cAPI.hasSufficientPolicy(task, token)
+ result, err := cAPI.canWriteService(task, token)
require.NoError(t, err)
require.Equal(t, exp, result)
}
@@ -200,3 +200,54 @@ func TestConsulACLsAPI_hasSufficientPolicy(t *testing.T) {
try(t, "service1", consul.ExampleOperatorToken4, true)
})
}
+
+func TestConsulPolicy_allowKeystoreRead(t *testing.T) {
+ t.Run("empty", func(t *testing.T) {
+ require.False(t, new(ConsulPolicy).allowsKeystoreRead())
+ })
+
+ t.Run("services only", func(t *testing.T) {
+ require.False(t, (&ConsulPolicy{
+ Services: []*ConsulServiceRule{{
+ Name: "service1",
+ Policy: "write",
+ }},
+ }).allowsKeystoreRead())
+ })
+
+ t.Run("kv any read", func(t *testing.T) {
+ require.True(t, (&ConsulPolicy{
+ KeyPrefixes: []*ConsulKeyRule{{
+ Name: "",
+ Policy: "read",
+ }},
+ }).allowsKeystoreRead())
+ })
+
+ t.Run("kv any write", func(t *testing.T) {
+ require.True(t, (&ConsulPolicy{
+ KeyPrefixes: []*ConsulKeyRule{{
+ Name: "",
+ Policy: "write",
+ }},
+ }).allowsKeystoreRead())
+ })
+
+ t.Run("kv limited read", func(t *testing.T) {
+ require.False(t, (&ConsulPolicy{
+ KeyPrefixes: []*ConsulKeyRule{{
+ Name: "foo/bar",
+ Policy: "read",
+ }},
+ }).allowsKeystoreRead())
+ })
+
+ t.Run("kv limited write", func(t *testing.T) {
+ require.False(t, (&ConsulPolicy{
+ KeyPrefixes: []*ConsulKeyRule{{
+ Name: "foo/bar",
+ Policy: "write",
+ }},
+ }).allowsKeystoreRead())
+ })
+}
diff --git a/nomad/consul_test.go b/nomad/consul_test.go
index c57a0e32b..1dde65b92 100644
--- a/nomad/consul_test.go
+++ b/nomad/consul_test.go
@@ -81,11 +81,11 @@ type mockConsulACLsAPI struct {
stopped bool
}
-func (m *mockConsulACLsAPI) CheckSIPolicy(_ context.Context, _, _ string) error {
+func (m *mockConsulACLsAPI) CheckPermissions(context.Context, string, *structs.ConsulUsage, string) error {
panic("not implemented yet")
}
-func (m *mockConsulACLsAPI) CreateToken(_ context.Context, _ ServiceIdentityRequest) (*structs.SIToken, error) {
+func (m *mockConsulACLsAPI) CreateToken(context.Context, ServiceIdentityRequest) (*structs.SIToken, error) {
panic("not implemented yet")
}
@@ -148,10 +148,11 @@ func TestConsulACLsAPI_CreateToken(t *testing.T) {
ctx := context.Background()
sii := ServiceIdentityRequest{
- AllocID: uuid.Generate(),
- ClusterID: uuid.Generate(),
- TaskName: "my-task1-sidecar-proxy",
- TaskKind: structs.NewTaskKind(structs.ConnectProxyPrefix, "my-service"),
+ ConsulNamespace: "foo-namespace",
+ AllocID: uuid.Generate(),
+ ClusterID: uuid.Generate(),
+ TaskName: "my-task1-sidecar-proxy",
+ TaskKind: structs.NewTaskKind(structs.ConnectProxyPrefix, "my-service"),
}
token, err := c.CreateToken(ctx, sii)
@@ -161,6 +162,7 @@ func TestConsulACLsAPI_CreateToken(t *testing.T) {
require.Nil(t, token)
} else {
require.NoError(t, err)
+ require.Equal(t, "foo-namespace", token.ConsulNamespace)
require.Equal(t, "my-task1-sidecar-proxy", token.TaskName)
require.True(t, helper.IsUUID(token.AccessorID))
require.True(t, helper.IsUUID(token.SecretID))
@@ -187,10 +189,11 @@ func TestConsulACLsAPI_RevokeTokens(t *testing.T) {
ctx := context.Background()
generated, err := c.CreateToken(ctx, ServiceIdentityRequest{
- ClusterID: uuid.Generate(),
- AllocID: uuid.Generate(),
- TaskName: "task1-sidecar-proxy",
- TaskKind: structs.NewTaskKind(structs.ConnectProxyPrefix, "service1"),
+ ConsulNamespace: "foo-namespace",
+ ClusterID: uuid.Generate(),
+ AllocID: uuid.Generate(),
+ TaskName: "task1-sidecar-proxy",
+ TaskKind: structs.NewTaskKind(structs.ConnectProxyPrefix, "service1"),
})
require.NoError(t, err)
@@ -202,7 +205,10 @@ func TestConsulACLsAPI_RevokeTokens(t *testing.T) {
accessors := func(ids ...string) (result []*structs.SITokenAccessor) {
for _, id := range ids {
- result = append(result, &structs.SITokenAccessor{AccessorID: id})
+ result = append(result, &structs.SITokenAccessor{
+ AccessorID: id,
+ ConsulNamespace: "foo-namespace",
+ })
}
return
}
@@ -236,17 +242,21 @@ func TestConsulACLsAPI_MarkForRevocation(t *testing.T) {
c := NewConsulACLsAPI(aclAPI, logger, nil)
generated, err := c.CreateToken(context.Background(), ServiceIdentityRequest{
- ClusterID: uuid.Generate(),
- AllocID: uuid.Generate(),
- TaskName: "task1-sidecar-proxy",
- TaskKind: structs.NewTaskKind(structs.ConnectProxyPrefix, "service1"),
+ ConsulNamespace: "foo-namespace",
+ ClusterID: uuid.Generate(),
+ AllocID: uuid.Generate(),
+ TaskName: "task1-sidecar-proxy",
+ TaskKind: structs.NewTaskKind(structs.ConnectProxyPrefix, "service1"),
})
require.NoError(t, err)
// set the mock error after calling CreateToken for setting up
aclAPI.SetError(nil)
- accessors := []*structs.SITokenAccessor{{AccessorID: generated.AccessorID}}
+ accessors := []*structs.SITokenAccessor{{
+ ConsulNamespace: "foo-namespace",
+ AccessorID: generated.AccessorID,
+ }}
c.MarkForRevocation(accessors)
require.Len(t, c.bgRetryRevocation, 1)
require.Contains(t, c.bgRetryRevocation, accessors[0])
@@ -282,10 +292,11 @@ func TestConsulACLsAPI_bgRetryRevoke(t *testing.T) {
c, server := setup(t)
accessorID := uuid.Generate()
c.bgRetryRevocation = append(c.bgRetryRevocation, &structs.SITokenAccessor{
- NodeID: uuid.Generate(),
- AllocID: uuid.Generate(),
- AccessorID: accessorID,
- TaskName: "task1",
+ ConsulNamespace: "foo-namespace",
+ NodeID: uuid.Generate(),
+ AllocID: uuid.Generate(),
+ AccessorID: accessorID,
+ TaskName: "task1",
})
require.Empty(t, server.purgedAccessorIDs)
c.bgRetryRevoke()
@@ -299,10 +310,11 @@ func TestConsulACLsAPI_bgRetryRevoke(t *testing.T) {
server.failure = errors.New("revocation fail")
accessorID := uuid.Generate()
c.bgRetryRevocation = append(c.bgRetryRevocation, &structs.SITokenAccessor{
- NodeID: uuid.Generate(),
- AllocID: uuid.Generate(),
- AccessorID: accessorID,
- TaskName: "task1",
+ ConsulNamespace: "foo-namespace",
+ NodeID: uuid.Generate(),
+ AllocID: uuid.Generate(),
+ AccessorID: accessorID,
+ TaskName: "task1",
})
require.Empty(t, server.purgedAccessorIDs)
c.bgRetryRevoke()
@@ -329,43 +341,125 @@ func TestConsulACLsAPI_Stop(t *testing.T) {
require.Error(t, err)
}
-func TestConsulACLsAPI_CheckSIPolicy(t *testing.T) {
+// CheckPermissions(ctx context.Context, namespace string, usage *structs.ConsulUsage) error
+
+func TestConsulACLsAPI_CheckPermissions(t *testing.T) {
t.Parallel()
- try := func(t *testing.T, service, token string, expErr string) {
+ equalError := func(t *testing.T, exp, err error) {
+ if exp == nil {
+ require.NoError(t, err)
+ } else {
+ require.Equal(t, exp.Error(), err.Error())
+ }
+ }
+
+ try := func(t *testing.T, namespace string, usage *structs.ConsulUsage, secretID string, exp error) {
logger := testlog.HCLogger(t)
aclAPI := consul.NewMockACLsAPI(logger)
cAPI := NewConsulACLsAPI(aclAPI, logger, nil)
- err := cAPI.CheckSIPolicy(context.Background(), service, token)
- if expErr != "" {
- require.EqualError(t, err, expErr)
- } else {
- require.NoError(t, err)
- }
+ err := cAPI.CheckPermissions(context.Background(), namespace, usage, secretID)
+ equalError(t, exp, err)
}
- t.Run("operator has service write", func(t *testing.T) {
- try(t, "service1", consul.ExampleOperatorTokenID1, "")
+ t.Run("check-permissions kv read", func(t *testing.T) {
+ t.Run("uses kv has permission", func(t *testing.T) {
+ u := &structs.ConsulUsage{KV: true}
+ try(t, "default", u, consul.ExampleOperatorTokenID5, nil)
+ })
+
+ t.Run("uses kv without permission", func(t *testing.T) {
+ u := &structs.ConsulUsage{KV: true}
+ try(t, "default", u, consul.ExampleOperatorTokenID1, errors.New("insufficient Consul ACL permissions to use template"))
+ })
+
+ t.Run("uses kv no token", func(t *testing.T) {
+ u := &structs.ConsulUsage{KV: true}
+ try(t, "default", u, "", errors.New("missing consul token"))
+ })
+
+ t.Run("uses kv nonsense token", func(t *testing.T) {
+ u := &structs.ConsulUsage{KV: true}
+ try(t, "default", u, "47d33e22-720a-7fe6-7d7f-418bf844a0be", errors.New("unable to read consul token: no such token"))
+ })
+
+ t.Run("no kv no token", func(t *testing.T) {
+ u := &structs.ConsulUsage{KV: false}
+ try(t, "default", u, "", nil)
+ })
+
+ t.Run("uses kv wrong namespace", func(t *testing.T) {
+ u := &structs.ConsulUsage{KV: true}
+ try(t, "other", u, consul.ExampleOperatorTokenID5, errors.New(`consul ACL token cannot use namespace "other"`))
+ })
})
- t.Run("operator has service_prefix write", func(t *testing.T) {
- try(t, "foo-service1", consul.ExampleOperatorTokenID2, "")
+ t.Run("check-permissions service write", func(t *testing.T) {
+ usage := &structs.ConsulUsage{Services: []string{"service1"}}
+
+ t.Run("operator has service write", func(t *testing.T) {
+ try(t, "default", usage, consul.ExampleOperatorTokenID1, nil)
+ })
+
+ t.Run("operator has service wrote wrong ns", func(t *testing.T) {
+ try(t, "other", usage, consul.ExampleOperatorTokenID1, errors.New(`consul ACL token cannot use namespace "other"`))
+ })
+
+ t.Run("operator has service_prefix write", func(t *testing.T) {
+ u := &structs.ConsulUsage{Services: []string{"foo-service1"}}
+ try(t, "default", u, consul.ExampleOperatorTokenID2, nil)
+ })
+
+ t.Run("operator has service_prefix write wrong prefix", func(t *testing.T) {
+ u := &structs.ConsulUsage{Services: []string{"bar-service1"}}
+ try(t, "default", u, consul.ExampleOperatorTokenID2, errors.New(`insufficient Consul ACL permissions to write service "bar-service1"`))
+ })
+
+ t.Run("operator permissions insufficient", func(t *testing.T) {
+ try(t, "default", usage, consul.ExampleOperatorTokenID3, errors.New(`insufficient Consul ACL permissions to write service "service1"`))
+ })
+
+ t.Run("operator provided no token", func(t *testing.T) {
+ try(t, "default", usage, "", errors.New("missing consul token"))
+ })
+
+ t.Run("operator provided nonsense token", func(t *testing.T) {
+ try(t, "default", usage, "f1682bde-1e71-90b1-9204-85d35467ba61", errors.New("unable to read consul token: no such token"))
+ })
})
- t.Run("operator permissions insufficient", func(t *testing.T) {
- try(t, "service1", consul.ExampleOperatorTokenID3,
- "permission denied for \"service1\"",
- )
- })
+ t.Run("check-permissions connect service identity write", func(t *testing.T) {
+ usage := &structs.ConsulUsage{Kinds: []structs.TaskKind{structs.NewTaskKind(structs.ConnectProxyPrefix, "service1")}}
- t.Run("no token provided", func(t *testing.T) {
- try(t, "service1", "", "missing consul token")
- })
+ t.Run("operator has service write", func(t *testing.T) {
+ try(t, "default", usage, consul.ExampleOperatorTokenID1, nil)
+ })
- t.Run("nonsense token provided", func(t *testing.T) {
- try(t, "service1", "f1682bde-1e71-90b1-9204-85d35467ba61",
- "unable to validate operator consul token: no such token",
- )
+ t.Run("operator has service write wrong ns", func(t *testing.T) {
+ try(t, "other", usage, consul.ExampleOperatorTokenID1, errors.New(`consul ACL token cannot use namespace "other"`))
+ })
+
+ t.Run("operator has service_prefix write", func(t *testing.T) {
+ u := &structs.ConsulUsage{Kinds: []structs.TaskKind{structs.NewTaskKind(structs.ConnectProxyPrefix, "foo-service1")}}
+ try(t, "default", u, consul.ExampleOperatorTokenID2, nil)
+ })
+
+ t.Run("operator has service_prefix write wrong prefix", func(t *testing.T) {
+ u := &structs.ConsulUsage{Kinds: []structs.TaskKind{structs.NewTaskKind(structs.ConnectProxyPrefix, "bar-service1")}}
+ try(t, "default", u, consul.ExampleOperatorTokenID2, errors.New(`insufficient Consul ACL permissions to write Connect service "bar-service1"`))
+ })
+
+ t.Run("operator permissions insufficient", func(t *testing.T) {
+ try(t, "default", usage, consul.ExampleOperatorTokenID3, errors.New(`insufficient Consul ACL permissions to write Connect service "service1"`))
+ })
+
+ t.Run("operator provided no token", func(t *testing.T) {
+ try(t, "default", usage, "", errors.New("missing consul token"))
+ })
+
+ t.Run("operator provided nonsense token", func(t *testing.T) {
+ try(t, "default", usage, "f1682bde-1e71-90b1-9204-85d35467ba61", errors.New("unable to read consul token: no such token"))
+ })
})
}
diff --git a/nomad/deployment_watcher_shims.go b/nomad/deployment_watcher_shims.go
index 7f2e34685..a7fb7da71 100644
--- a/nomad/deployment_watcher_shims.go
+++ b/nomad/deployment_watcher_shims.go
@@ -30,6 +30,7 @@ func (d *deploymentWatcherRaftShim) UpsertJob(job *structs.Job) (uint64, error)
update := &structs.JobRegisterRequest{
Job: job,
}
+
fsmErrIntf, index, raftErr := d.apply(structs.JobRegisterRequestType, update)
return d.convertApplyErrors(fsmErrIntf, index, raftErr)
}
diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go
index 9f4371481..7212db8ab 100644
--- a/nomad/job_endpoint.go
+++ b/nomad/job_endpoint.go
@@ -253,30 +253,31 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis
}
}
- // helper function that checks if the "operator token" supplied with the
- // job has sufficient ACL permissions for establishing consul connect services
- checkOperatorToken := func(kind structs.TaskKind) error {
+ // helper function that checks if the Consul token supplied with the job has
+ // sufficient ACL permissions for:
+ // - registering services into namespace of each group
+ // - reading kv store of each group
+ // - establishing consul connect services
+ checkConsulToken := func(usages map[string]*structs.ConsulUsage) error {
if j.srv.config.ConsulConfig.AllowsUnauthenticated() {
// if consul.allow_unauthenticated is enabled (which is the default)
- // just let the Job through without checking anything.
+ // just let the job through without checking anything
return nil
}
- service := kind.Value()
ctx := context.Background()
- if err := j.srv.consulACLs.CheckSIPolicy(ctx, service, args.Job.ConsulToken); err != nil {
- // not much in the way of exported error types, we could parse
- // the content, but all errors are going to be failures anyway
- return errors.Wrap(err, "operator token denied")
+ for namespace, usage := range usages {
+ if err := j.srv.consulACLs.CheckPermissions(ctx, namespace, usage, args.Job.ConsulToken); err != nil {
+ return errors.Wrap(err, "job-submitter consul token denied")
+ }
}
+
return nil
}
- // Enforce that the operator has necessary Consul ACL permissions
- for _, taskKind := range args.Job.ConnectTasks() {
- if err := checkOperatorToken(taskKind); err != nil {
- return err
- }
+ // Enforce the job-submitter has a Consul token with necessary ACL permissions.
+ if err := checkConsulToken(args.Job.ConsulUsages()); err != nil {
+ return err
}
// Create or Update Consul Configuration Entries defined in the job. For now
diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go
index 2278ca71c..944c7f772 100644
--- a/nomad/job_endpoint_test.go
+++ b/nomad/job_endpoint_test.go
@@ -730,6 +730,9 @@ func TestJobEndpoint_Register_Connect_AllowUnauthenticatedFalse(t *testing.T) {
},
}
+ // For this test we only care about authorizing the connect service
+ job.TaskGroups[0].Tasks[0].Services = nil
+
newRequest := func(job *structs.Job) *structs.JobRegisterRequest {
return &structs.JobRegisterRequest{
Job: job,
@@ -760,7 +763,7 @@ func TestJobEndpoint_Register_Connect_AllowUnauthenticatedFalse(t *testing.T) {
request.Job.ConsulToken = noOpToken
var response structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", request, &response)
- require.EqualError(t, err, "operator token denied: missing consul token")
+ require.EqualError(t, err, "job-submitter consul token denied: missing consul token")
})
t.Run("unknown token provided", func(t *testing.T) {
@@ -768,7 +771,7 @@ func TestJobEndpoint_Register_Connect_AllowUnauthenticatedFalse(t *testing.T) {
request.Job.ConsulToken = unrecognizedOpToken
var response structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", request, &response)
- require.EqualError(t, err, "operator token denied: unable to validate operator consul token: no such token")
+ require.EqualError(t, err, "job-submitter consul token denied: unable to read consul token: no such token")
})
t.Run("unauthorized token provided", func(t *testing.T) {
@@ -776,7 +779,7 @@ func TestJobEndpoint_Register_Connect_AllowUnauthenticatedFalse(t *testing.T) {
request.Job.ConsulToken = unauthorizedOpToken
var response structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", request, &response)
- require.EqualError(t, err, "operator token denied: permission denied for \"service1\"")
+ require.EqualError(t, err, `job-submitter consul token denied: insufficient Consul ACL permissions to write service "service1"`)
})
t.Run("authorized token provided", func(t *testing.T) {
diff --git a/nomad/node_endpoint.go b/nomad/node_endpoint.go
index 184ef1f56..1431a1ceb 100644
--- a/nomad/node_endpoint.go
+++ b/nomad/node_endpoint.go
@@ -1759,10 +1759,11 @@ func (n *Node) DeriveSIToken(args *structs.DeriveSITokenRequest, reply *structs.
return nil
}
secret, err := n.srv.consulACLs.CreateToken(ctx, ServiceIdentityRequest{
- TaskKind: task.TaskKind,
- TaskName: task.TaskName,
- ClusterID: clusterID,
- AllocID: alloc.ID,
+ ConsulNamespace: tg.Consul.GetNamespace(),
+ TaskKind: task.TaskKind,
+ TaskName: task.TaskName,
+ ClusterID: clusterID,
+ AllocID: alloc.ID,
})
if err != nil {
return err
@@ -1795,10 +1796,11 @@ func (n *Node) DeriveSIToken(args *structs.DeriveSITokenRequest, reply *structs.
for task, secret := range results {
tokens[task] = secret.SecretID
accessor := &structs.SITokenAccessor{
- NodeID: alloc.NodeID,
- AllocID: alloc.ID,
- TaskName: task,
- AccessorID: secret.AccessorID,
+ ConsulNamespace: tg.Consul.GetNamespace(),
+ NodeID: alloc.NodeID,
+ AllocID: alloc.ID,
+ TaskName: task,
+ AccessorID: secret.AccessorID,
}
accessors = append(accessors, accessor)
}
diff --git a/nomad/structs/consul.go b/nomad/structs/consul.go
new file mode 100644
index 000000000..410739ae2
--- /dev/null
+++ b/nomad/structs/consul.go
@@ -0,0 +1,88 @@
+package structs
+
+// Consul represents optional per-group consul configuration.
+type Consul struct {
+ // Namespace in which to operate in Consul.
+ Namespace string
+}
+
+// Copy the Consul block.
+func (c *Consul) Copy() *Consul {
+ if c == nil {
+ return nil
+ }
+ return &Consul{
+ Namespace: c.Namespace,
+ }
+}
+
+// Equals returns whether c and o are the same.
+func (c *Consul) Equals(o *Consul) bool {
+ if c == nil || o == nil {
+ return c == o
+ }
+ return c.Namespace == o.Namespace
+}
+
+// Validate returns whether c is valid.
+func (c *Consul) Validate() error {
+ // nothing to do here
+ return nil
+}
+
+// ConsulUsage is provides meta information about how Consul is used by a job,
+// noting which connect services and normal services will be registered, and
+// whether the keystore will be read via template.
+type ConsulUsage struct {
+ Kinds []TaskKind
+ Services []string
+ KV bool
+}
+
+// Unused returns true if Consul is used for registering services or reading from
+// the keystore.
+func (cu *ConsulUsage) Used() bool {
+ switch {
+ case cu.KV:
+ return true
+ case len(cu.Kinds) > 0:
+ return true
+ case len(cu.Services) > 0:
+ return true
+ }
+ return false
+}
+
+// ConsulUsages returns a map from Consul namespace to things that will use Consul,
+// including ConsulConnect TaskKinds, Consul Services from groups and tasks, and
+// a boolean indicating if Consul KV is in use.
+func (j *Job) ConsulUsages() map[string]*ConsulUsage {
+ m := make(map[string]*ConsulUsage)
+
+ for _, tg := range j.TaskGroups {
+ namespace := j.ConsulNamespace
+ if tgNamespace := tg.Consul.GetNamespace(); tgNamespace != "" {
+ namespace = tgNamespace
+ }
+ if _, exists := m[namespace]; !exists {
+ m[namespace] = new(ConsulUsage)
+ }
+
+ // Gather group services
+ for _, service := range tg.Services {
+ m[namespace].Services = append(m[namespace].Services, service.Name)
+ }
+
+ // Gather task services and KV usage
+ for _, task := range tg.Tasks {
+ for _, service := range task.Services {
+ m[namespace].Services = append(m[namespace].Services, service.Name)
+ }
+ if len(task.Templates) > 0 {
+ m[namespace].KV = true
+ }
+ }
+ }
+
+ return m
+}
diff --git a/nomad/structs/consul_oss.go b/nomad/structs/consul_oss.go
new file mode 100644
index 000000000..a2a2cc4e8
--- /dev/null
+++ b/nomad/structs/consul_oss.go
@@ -0,0 +1,7 @@
+// +build !ent
+
+package structs
+
+func (c *Consul) GetNamespace() string {
+ return ""
+}
diff --git a/nomad/structs/consul_test.go b/nomad/structs/consul_test.go
new file mode 100644
index 000000000..43801c933
--- /dev/null
+++ b/nomad/structs/consul_test.go
@@ -0,0 +1,55 @@
+package structs
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestConsul_Copy(t *testing.T) {
+ t.Run("nil", func(t *testing.T) {
+ result := (*Consul)(nil).Copy()
+ require.Nil(t, result)
+ })
+
+ t.Run("set", func(t *testing.T) {
+ result := (&Consul{
+ Namespace: "one",
+ }).Copy()
+ require.Equal(t, &Consul{Namespace: "one"}, result)
+ })
+}
+
+func TestConsul_Equals(t *testing.T) {
+ t.Run("nil and nil", func(t *testing.T) {
+ result := (*Consul)(nil).Equals((*Consul)(nil))
+ require.True(t, result)
+ })
+
+ t.Run("nil and set", func(t *testing.T) {
+ result := (*Consul)(nil).Equals(&Consul{Namespace: "one"})
+ require.False(t, result)
+ })
+
+ t.Run("same", func(t *testing.T) {
+ result := (&Consul{Namespace: "one"}).Equals(&Consul{Namespace: "one"})
+ require.True(t, result)
+ })
+
+ t.Run("different", func(t *testing.T) {
+ result := (&Consul{Namespace: "one"}).Equals(&Consul{Namespace: "two"})
+ require.False(t, result)
+ })
+}
+
+func TestConsul_Validate(t *testing.T) {
+ t.Run("empty ns", func(t *testing.T) {
+ result := (&Consul{Namespace: ""}).Validate()
+ require.Nil(t, result)
+ })
+
+ t.Run("with ns", func(t *testing.T) {
+ result := (&Consul{Namespace: "one"}).Validate()
+ require.Nil(t, result)
+ })
+}
diff --git a/nomad/structs/diff.go b/nomad/structs/diff.go
index 4d4c7a5fc..6964c009a 100644
--- a/nomad/structs/diff.go
+++ b/nomad/structs/diff.go
@@ -300,6 +300,11 @@ func (tg *TaskGroup) Diff(other *TaskGroup, contextual bool) (*TaskGroupDiff, er
diff.Objects = append(diff.Objects, diskDiff)
}
+ consulDiff := primitiveObjectDiff(tg.Consul, other.Consul, nil, "Consul", contextual)
+ if consulDiff != nil {
+ diff.Objects = append(diff.Objects, consulDiff)
+ }
+
// Update diff
// COMPAT: Remove "Stagger" in 0.7.0.
if uDiff := primitiveObjectDiff(tg.Update, other.Update, []string{"Stagger"}, "Update", contextual); uDiff != nil {
diff --git a/nomad/structs/diff_test.go b/nomad/structs/diff_test.go
index ec1fe9c12..761337674 100644
--- a/nomad/structs/diff_test.go
+++ b/nomad/structs/diff_test.go
@@ -1661,6 +1661,88 @@ func TestTaskGroupDiff(t *testing.T) {
},
},
},
+ {
+ TestCase: "Consul added",
+ Old: &TaskGroup{},
+ New: &TaskGroup{
+ Consul: &Consul{
+ Namespace: "team1",
+ },
+ },
+ Expected: &TaskGroupDiff{
+ Type: DiffTypeEdited,
+ Objects: []*ObjectDiff{
+ {
+ Type: DiffTypeAdded,
+ Name: "Consul",
+ Fields: []*FieldDiff{
+ {
+ Type: DiffTypeAdded,
+ Name: "Namespace",
+ Old: "",
+ New: "team1",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ TestCase: "Consul deleted",
+ Old: &TaskGroup{
+ Consul: &Consul{
+ Namespace: "team1",
+ },
+ },
+ New: &TaskGroup{},
+ Expected: &TaskGroupDiff{
+ Type: DiffTypeEdited,
+ Objects: []*ObjectDiff{
+ {
+ Type: DiffTypeDeleted,
+ Name: "Consul",
+ Fields: []*FieldDiff{
+ {
+ Type: DiffTypeDeleted,
+ Name: "Namespace",
+ Old: "team1",
+ New: "",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ TestCase: "Consul updated",
+ Old: &TaskGroup{
+ Consul: &Consul{
+ Namespace: "team1",
+ },
+ },
+ New: &TaskGroup{
+ Consul: &Consul{
+ Namespace: "team2",
+ },
+ },
+ Expected: &TaskGroupDiff{
+ Type: DiffTypeEdited,
+ Objects: []*ObjectDiff{
+ {
+ Type: DiffTypeEdited,
+ Name: "Consul",
+ Fields: []*FieldDiff{
+ {
+ Type: DiffTypeEdited,
+ Name: "Namespace",
+ Old: "team1",
+ New: "team2",
+ },
+ },
+ },
+ },
+ },
+ },
{
TestCase: "RestartPolicy added",
Old: &TaskGroup{},
@@ -2591,6 +2673,7 @@ func TestTaskGroupDiff(t *testing.T) {
Services: []*Service{
{
Name: "foo",
+ Namespace: "team1",
TaskName: "task1",
EnableTagOverride: false,
Checks: []*ServiceCheck{
@@ -2667,6 +2750,7 @@ func TestTaskGroupDiff(t *testing.T) {
Services: []*Service{
{
Name: "foo",
+ Namespace: "team1",
TaskName: "task2",
EnableTagOverride: true,
Checks: []*ServiceCheck{
@@ -2775,6 +2859,12 @@ func TestTaskGroupDiff(t *testing.T) {
Old: "foo",
New: "foo",
},
+ {
+ Type: DiffTypeNone,
+ Name: "Namespace",
+ Old: "team1",
+ New: "team1",
+ },
{
Type: DiffTypeNone,
Name: "OnUpdate",
@@ -3634,7 +3724,7 @@ func TestTaskGroupDiff(t *testing.T) {
Old: &TaskGroup{},
New: &TaskGroup{
Volumes: map[string]*VolumeRequest{
- "foo": &VolumeRequest{
+ "foo": {
Name: "foo",
Type: "host",
Source: "foo-src",
@@ -3691,7 +3781,7 @@ func TestTaskGroupDiff(t *testing.T) {
TestCase: "TaskGroup volumes edited",
Old: &TaskGroup{
Volumes: map[string]*VolumeRequest{
- "foo": &VolumeRequest{
+ "foo": {
Name: "foo",
Type: "csi",
Source: "foo-src1",
@@ -3701,7 +3791,7 @@ func TestTaskGroupDiff(t *testing.T) {
MountFlags: []string{"relatime", "rw"},
},
},
- "bar": &VolumeRequest{
+ "bar": {
Name: "bar",
Type: "host",
Source: "bar-src",
@@ -3711,7 +3801,7 @@ func TestTaskGroupDiff(t *testing.T) {
},
New: &TaskGroup{
Volumes: map[string]*VolumeRequest{
- "foo": &VolumeRequest{
+ "foo": {
Name: "foo",
Type: "csi",
Source: "foo-src2",
@@ -3721,7 +3811,7 @@ func TestTaskGroupDiff(t *testing.T) {
MountFlags: []string{"relatime", "rw", "nosuid"},
},
},
- "bar": &VolumeRequest{ // untouched
+ "bar": { // untouched
Name: "bar",
Type: "host",
Source: "bar-src",
@@ -5455,6 +5545,12 @@ func TestTaskDiff(t *testing.T) {
Old: "foo",
New: "foo",
},
+ {
+ Type: DiffTypeNone,
+ Name: "Namespace",
+ Old: "",
+ New: "",
+ },
{
Type: DiffTypeNone,
Name: "OnUpdate",
@@ -5599,6 +5695,10 @@ func TestTaskDiff(t *testing.T) {
Type: DiffTypeNone,
Name: "Name",
},
+ {
+ Type: DiffTypeNone,
+ Name: "Namespace",
+ },
{
Type: DiffTypeNone,
Name: "OnUpdate",
@@ -6102,6 +6202,12 @@ func TestTaskDiff(t *testing.T) {
Old: "foo",
New: "foo",
},
+ {
+ Type: DiffTypeNone,
+ Name: "Namespace",
+ Old: "",
+ New: "",
+ },
{
Type: DiffTypeNone,
Name: "OnUpdate",
diff --git a/nomad/structs/service_identities.go b/nomad/structs/service_identities.go
index 64423da38..29e249c8b 100644
--- a/nomad/structs/service_identities.go
+++ b/nomad/structs/service_identities.go
@@ -4,18 +4,20 @@ import "errors"
// An SIToken is the important bits of a Service Identity token generated by Consul.
type SIToken struct {
- TaskName string // the nomad task backing the consul service (native or sidecar)
- AccessorID string
- SecretID string
+ ConsulNamespace string
+ TaskName string // the nomad task backing the consul service (native or sidecar)
+ AccessorID string
+ SecretID string
}
-// An SITokenAccessor is a reference to a created Service Identity token on
+// An SITokenAccessor is a reference to a created Consul Service Identity token on
// behalf of an allocation's task.
type SITokenAccessor struct {
- NodeID string
- AllocID string
- AccessorID string
- TaskName string
+ ConsulNamespace string
+ NodeID string
+ AllocID string
+ AccessorID string
+ TaskName string
// Raft index
CreateIndex uint64
diff --git a/nomad/structs/services.go b/nomad/structs/services.go
index 03a777f88..b30839ad6 100644
--- a/nomad/structs/services.go
+++ b/nomad/structs/services.go
@@ -452,6 +452,12 @@ type Service struct {
Meta map[string]string // Consul service meta
CanaryMeta map[string]string // Consul service meta when it is a canary
+ // The consul namespace in which this service will be registered. Namespace
+ // at the service.check level is not part of the Nomad API - it must be
+ // set at the job or group level. This field is managed internally so
+ // that Hash can work correctly.
+ Namespace string
+
// OnUpdate Specifies how the service and its checks should be evaluated
// during an update
OnUpdate string
@@ -514,6 +520,12 @@ func (s *Service) Canonicalize(job string, taskGroup string, task string) {
for _, check := range s.Checks {
check.Canonicalize(s.Name)
}
+
+ // Consul API returns "default" whether the namespace is empty or set as
+ // such, so we coerce our copy of the service to be the same.
+ if s.Namespace == "" {
+ s.Namespace = "default"
+ }
}
// Validate checks if the Service definition is valid
@@ -610,6 +622,7 @@ func (s *Service) Hash(allocID, taskName string, canary bool) string {
hashMeta(h, s.CanaryMeta)
hashConnect(h, s.Connect)
hashString(h, s.OnUpdate)
+ hashString(h, s.Namespace)
// Base32 is used for encoding the hash as sha1 hashes can always be
// encoded without padding, only 4 bytes larger than base64, and saves
@@ -666,6 +679,10 @@ func (s *Service) Equals(o *Service) bool {
return s == o
}
+ if s.Namespace != o.Namespace {
+ return false
+ }
+
if s.AddressMode != o.AddressMode {
return false
}
diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go
index a367c869e..e92d42981 100644
--- a/nomad/structs/structs.go
+++ b/nomad/structs/structs.go
@@ -4029,12 +4029,15 @@ type Job struct {
// token and is not stored after Job submission.
ConsulToken string
+ // ConsulNamespace is the Consul namespace
+ ConsulNamespace string
+
// VaultToken is the Vault token that proves the submitter of the job has
// access to the specified Vault policies. This field is only used to
// transfer the token and is not stored after Job submission.
VaultToken string
- // VaultNamespace is the Vault namepace
+ // VaultNamespace is the Vault namespace
VaultNamespace string
// NomadTokenID is the Accessor ID of the ACL token (if any)
@@ -5941,6 +5944,9 @@ type TaskGroup struct {
// overridden in the task.
Networks Networks
+ // Consul configuration specific to this task group
+ Consul *Consul
+
// Services this group provides
Services []*Service
@@ -5970,6 +5976,7 @@ func (tg *TaskGroup) Copy() *TaskGroup {
ntg.Spreads = CopySliceSpreads(ntg.Spreads)
ntg.Volumes = CopyMapVolumeRequest(ntg.Volumes)
ntg.Scaling = ntg.Scaling.Copy()
+ ntg.Consul = ntg.Consul.Copy()
// Copy the network objects
if tg.Networks != nil {
@@ -9177,6 +9184,12 @@ type Allocation struct {
ModifyTime int64
}
+// ConsulNamespace returns the Consul namespace of the task group associated
+// with this allocation.
+func (a *Allocation) ConsulNamespace() string {
+ return a.Job.LookupTaskGroup(a.TaskGroup).Consul.GetNamespace()
+}
+
// Index returns the index of the allocation. If the allocation is from a task
// group with count greater than 1, there will be multiple allocations for it.
func (a *Allocation) Index() uint {
diff --git a/scheduler/util.go b/scheduler/util.go
index caef16924..3bf944373 100644
--- a/scheduler/util.go
+++ b/scheduler/util.go
@@ -371,6 +371,11 @@ func tasksUpdated(jobA, jobB *structs.Job, taskGroup string) bool {
return true
}
+ // Check consul namespace updated
+ if consulNamespaceUpdated(a, b) {
+ return true
+ }
+
// Check connect service(s) updated
if connectServiceUpdated(a.Services, b.Services) {
return true
@@ -432,6 +437,18 @@ func tasksUpdated(jobA, jobB *structs.Job, taskGroup string) bool {
return false
}
+// consulNamespaceUpdated returns true if the Consul namespace in the task group
+// has been changed.
+//
+// This is treated as a destructive update unlike ordinary Consul service configuration
+// because Namespaces directly impact networking validity among Consul intentions.
+// Forcing the task through a reschedule is a sure way of breaking no-longer valid
+// network connections.
+func consulNamespaceUpdated(tgA, tgB *structs.TaskGroup) bool {
+ // job.ConsulNamespace is pushed down to the TGs, just check those
+ return tgA.Consul.GetNamespace() != tgB.Consul.GetNamespace()
+}
+
// connectServiceUpdated returns true if any services with a connect stanza have
// been changed in such a way that requires a destructive update.
//
diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go
index 7453feb8a..d8e0e04d9 100644
--- a/vendor/github.com/hashicorp/consul/api/acl.go
+++ b/vendor/github.com/hashicorp/consul/api/acl.go
@@ -270,16 +270,61 @@ type ACLAuthMethodNamespaceRule struct {
type ACLAuthMethodListEntry struct {
Name string
Type string
- DisplayName string `json:",omitempty"`
- Description string `json:",omitempty"`
- CreateIndex uint64
- ModifyIndex uint64
+ DisplayName string `json:",omitempty"`
+ Description string `json:",omitempty"`
+ MaxTokenTTL time.Duration `json:",omitempty"`
+
+ // TokenLocality defines the kind of token that this auth method produces.
+ // This can be either 'local' or 'global'. If empty 'local' is assumed.
+ TokenLocality string `json:",omitempty"`
+ CreateIndex uint64
+ ModifyIndex uint64
// Namespace is the namespace the ACLAuthMethodListEntry is associated with.
// Namespacing is a Consul Enterprise feature.
Namespace string `json:",omitempty"`
}
+// This is nearly identical to the ACLAuthMethod MarshalJSON
+func (m *ACLAuthMethodListEntry) MarshalJSON() ([]byte, error) {
+ type Alias ACLAuthMethodListEntry
+ exported := &struct {
+ MaxTokenTTL string `json:",omitempty"`
+ *Alias
+ }{
+ MaxTokenTTL: m.MaxTokenTTL.String(),
+ Alias: (*Alias)(m),
+ }
+ if m.MaxTokenTTL == 0 {
+ exported.MaxTokenTTL = ""
+ }
+
+ return json.Marshal(exported)
+}
+
+// This is nearly identical to the ACLAuthMethod UnmarshalJSON
+func (m *ACLAuthMethodListEntry) UnmarshalJSON(data []byte) error {
+ type Alias ACLAuthMethodListEntry
+ aux := &struct {
+ MaxTokenTTL string
+ *Alias
+ }{
+ Alias: (*Alias)(m),
+ }
+
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+ var err error
+ if aux.MaxTokenTTL != "" {
+ if m.MaxTokenTTL, err = time.ParseDuration(aux.MaxTokenTTL); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed
// KubernetesAuthMethodConfig.
func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) {
diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go
index a4cc143f0..0ed6fc298 100644
--- a/vendor/github.com/hashicorp/consul/api/agent.go
+++ b/vendor/github.com/hashicorp/consul/api/agent.go
@@ -3,6 +3,7 @@ package api
import (
"bufio"
"bytes"
+ "context"
"fmt"
"io"
"net/http"
@@ -121,6 +122,7 @@ type AgentServiceConnectProxyConfig struct {
Upstreams []Upstream `json:",omitempty"`
MeshGateway MeshGatewayConfig `json:",omitempty"`
Expose ExposeConfig `json:",omitempty"`
+ TransparentProxy bool `json:",omitempty"`
}
const (
@@ -266,12 +268,23 @@ type AgentServiceRegistration struct {
Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"`
}
-//ServiceRegisterOpts is used to pass extra options to the service register.
+// ServiceRegisterOpts is used to pass extra options to the service register.
type ServiceRegisterOpts struct {
//Missing healthchecks will be deleted from the agent.
//Using this parameter allows to idempotently register a service and its checks without
//having to manually deregister checks.
ReplaceExistingChecks bool
+
+ // ctx is an optional context pass through to the underlying HTTP
+ // request layer. Use WithContext() to set the context.
+ ctx context.Context
+}
+
+// WithContext sets the context to be used for the request on a new ServiceRegisterOpts,
+// and returns the opts.
+func (o ServiceRegisterOpts) WithContext(ctx context.Context) ServiceRegisterOpts {
+ o.ctx = ctx
+ return o
}
// AgentCheckRegistration is used to register a new check
@@ -301,6 +314,7 @@ type AgentServiceCheck struct {
TCP string `json:",omitempty"`
Status string `json:",omitempty"`
Notes string `json:",omitempty"`
+ TLSServerName string `json:",omitempty"`
TLSSkipVerify bool `json:",omitempty"`
GRPC string `json:",omitempty"`
GRPCUseTLS bool `json:",omitempty"`
@@ -394,6 +408,7 @@ type Upstream struct {
LocalBindPort int `json:",omitempty"`
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
MeshGateway MeshGatewayConfig `json:",omitempty"`
+ CentrallyConfigured bool `json:",omitempty" bexpr:"-"`
}
// Agent can be used to query the Agent endpoints
@@ -494,7 +509,14 @@ func (a *Agent) Checks() (map[string]*AgentCheck, error) {
// ChecksWithFilter returns a subset of the locally registered checks that match
// the given filter expression
func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) {
+ return a.ChecksWithFilterOpts(filter, nil)
+}
+
+// ChecksWithFilterOpts returns a subset of the locally registered checks that match
+// the given filter expression and QueryOptions.
+func (a *Agent) ChecksWithFilterOpts(filter string, q *QueryOptions) (map[string]*AgentCheck, error) {
r := a.c.newRequest("GET", "/v1/agent/checks")
+ r.setQueryOptions(q)
r.filterQuery(filter)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
@@ -517,7 +539,14 @@ func (a *Agent) Services() (map[string]*AgentService, error) {
// ServicesWithFilter returns a subset of the locally registered services that match
// the given filter expression
func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) {
+ return a.ServicesWithFilterOpts(filter, nil)
+}
+
+// ServicesWithFilterOpts returns a subset of the locally registered services that match
+// the given filter expression and QueryOptions.
+func (a *Agent) ServicesWithFilterOpts(filter string, q *QueryOptions) (map[string]*AgentService, error) {
r := a.c.newRequest("GET", "/v1/agent/services")
+ r.setQueryOptions(q)
r.filterQuery(filter)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
@@ -688,6 +717,7 @@ func (a *Agent) ServiceRegisterOpts(service *AgentServiceRegistration, opts Serv
func (a *Agent) serviceRegister(service *AgentServiceRegistration, opts ServiceRegisterOpts) error {
r := a.c.newRequest("PUT", "/v1/agent/service/register")
r.obj = service
+ r.ctx = opts.ctx
if opts.ReplaceExistingChecks {
r.params.Set("replace-existing-checks", "true")
}
@@ -711,6 +741,19 @@ func (a *Agent) ServiceDeregister(serviceID string) error {
return nil
}
+// ServiceDeregisterOpts is used to deregister a service with
+// the local agent with QueryOptions.
+func (a *Agent) ServiceDeregisterOpts(serviceID string, q *QueryOptions) error {
+ r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
+ r.setQueryOptions(q)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
// PassTTL is used to set a TTL check to the passing state.
//
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
@@ -785,6 +828,10 @@ type checkUpdate struct {
// strings for compatibility (though a newer version of Consul will still be
// required to use this API).
func (a *Agent) UpdateTTL(checkID, output, status string) error {
+ return a.UpdateTTLOpts(checkID, output, status, nil)
+}
+
+func (a *Agent) UpdateTTLOpts(checkID, output, status string, q *QueryOptions) error {
switch status {
case "pass", HealthPassing:
status = HealthPassing
@@ -798,6 +845,7 @@ func (a *Agent) UpdateTTL(checkID, output, status string) error {
endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID)
r := a.c.newRequest("PUT", endpoint)
+ r.setQueryOptions(q)
r.obj = &checkUpdate{
Status: status,
Output: output,
@@ -827,7 +875,14 @@ func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
// CheckDeregister is used to deregister a check with
// the local agent
func (a *Agent) CheckDeregister(checkID string) error {
+ return a.CheckDeregisterOpts(checkID, nil)
+}
+
+// CheckDeregisterOpts is used to deregister a check with
+// the local agent using query options
+func (a *Agent) CheckDeregisterOpts(checkID string, q *QueryOptions) error {
r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
+ r.setQueryOptions(q)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go
index 08f00c406..dbecaa5af 100644
--- a/vendor/github.com/hashicorp/consul/api/api.go
+++ b/vendor/github.com/hashicorp/consul/api/api.go
@@ -14,6 +14,7 @@ import (
"os"
"strconv"
"strings"
+ "sync"
"time"
"github.com/hashicorp/go-cleanhttp"
@@ -548,9 +549,48 @@ func (c *Config) GenerateEnv() []string {
// Client provides a client to the Consul API
type Client struct {
+ modifyLock sync.RWMutex
+ headers http.Header
+
config Config
}
+// Headers gets the current set of headers used for requests. This returns a
+// copy; to modify it call AddHeader or SetHeaders.
+func (c *Client) Headers() http.Header {
+ c.modifyLock.RLock()
+ defer c.modifyLock.RUnlock()
+
+ if c.headers == nil {
+ return nil
+ }
+
+ ret := make(http.Header)
+ for k, v := range c.headers {
+ for _, val := range v {
+ ret[k] = append(ret[k], val)
+ }
+ }
+
+ return ret
+}
+
+// AddHeader allows a single header key/value pair to be added
+// in a race-safe fashion.
+func (c *Client) AddHeader(key, value string) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+ c.headers.Add(key, value)
+}
+
+// SetHeaders clears all previous headers and uses only the given
+// ones going forward.
+func (c *Client) SetHeaders(headers http.Header) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+ c.headers = headers
+}
+
// NewClient returns a new client
func NewClient(config *Config) (*Client, error) {
// bootstrap the config
@@ -640,7 +680,7 @@ func NewClient(config *Config) (*Client, error) {
config.Token = defConfig.Token
}
- return &Client{config: *config}, nil
+ return &Client{config: *config, headers: make(http.Header)}, nil
}
// NewHttpClient returns an http client configured with the given Transport and TLS
@@ -853,8 +893,9 @@ func (c *Client) newRequest(method, path string) *request {
Path: path,
},
params: make(map[string][]string),
- header: make(http.Header),
+ header: c.Headers(),
}
+
if c.config.Datacenter != "" {
r.params.Set("dc", c.config.Datacenter)
}
diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go
index f5ef60e29..f303187cd 100644
--- a/vendor/github.com/hashicorp/consul/api/config_entry.go
+++ b/vendor/github.com/hashicorp/consul/api/config_entry.go
@@ -91,17 +91,94 @@ type ExposePath struct {
ParsedFromCheck bool
}
+type ConnectConfiguration struct {
+ // UpstreamConfigs is a map of service to per-upstream configuration
+ UpstreamConfigs map[string]UpstreamConfig `json:",omitempty" alias:"upstream_configs"`
+
+ // UpstreamDefaults contains default configuration for all upstreams of a given service
+ UpstreamDefaults UpstreamConfig `json:",omitempty" alias:"upstream_defaults"`
+}
+
+type UpstreamConfig struct {
+ // EnvoyListenerJSON is a complete override ("escape hatch") for the upstream's
+ // listener.
+ //
+ // Note: This escape hatch is NOT compatible with the discovery chain and
+ // will be ignored if a discovery chain is active.
+ EnvoyListenerJSON string `json:",omitempty" alias:"envoy_listener_json"`
+
+ // EnvoyClusterJSON is a complete override ("escape hatch") for the upstream's
+ // cluster. The Connect client TLS certificate and context will be injected
+ // overriding any TLS settings present.
+ //
+ // Note: This escape hatch is NOT compatible with the discovery chain and
+ // will be ignored if a discovery chain is active.
+ EnvoyClusterJSON string `json:",omitempty" alias:"envoy_cluster_json"`
+
+ // Protocol describes the upstream's service protocol. Valid values are "tcp",
+ // "http" and "grpc". Anything else is treated as tcp. The enables protocol
+ // aware features like per-request metrics and connection pooling, tracing,
+ // routing etc.
+ Protocol string `json:",omitempty"`
+
+ // ConnectTimeoutMs is the number of milliseconds to timeout making a new
+ // connection to this upstream. Defaults to 5000 (5 seconds) if not set.
+ ConnectTimeoutMs int `json:",omitempty" alias:"connect_timeout_ms"`
+
+ // Limits are the set of limits that are applied to the proxy for a specific upstream of a
+ // service instance.
+ Limits *UpstreamLimits `json:",omitempty"`
+
+ // PassiveHealthCheck configuration determines how upstream proxy instances will
+ // be monitored for removal from the load balancing pool.
+ PassiveHealthCheck *PassiveHealthCheck `json:",omitempty" alias:"passive_health_check"`
+
+ // MeshGatewayConfig controls how Mesh Gateways are configured and used
+ MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway" `
+}
+
+type PassiveHealthCheck struct {
+ // Interval between health check analysis sweeps. Each sweep may remove
+ // hosts or return hosts to the pool.
+ Interval time.Duration `json:",omitempty"`
+
+ // MaxFailures is the count of consecutive failures that results in a host
+ // being removed from the pool.
+ MaxFailures uint32 `alias:"max_failures"`
+}
+
+// UpstreamLimits describes the limits that are associated with a specific
+// upstream of a service instance.
+type UpstreamLimits struct {
+ // MaxConnections is the maximum number of connections the local proxy can
+ // make to the upstream service.
+ MaxConnections int `alias:"max_connections"`
+
+ // MaxPendingRequests is the maximum number of requests that will be queued
+ // waiting for an available connection. This is mostly applicable to HTTP/1.1
+ // clusters since all HTTP/2 requests are streamed over a single
+ // connection.
+ MaxPendingRequests int `alias:"max_pending_requests"`
+
+ // MaxConcurrentRequests is the maximum number of in-flight requests that will be allowed
+ // to the upstream cluster at a point in time. This is mostly applicable to HTTP/2
+ // clusters since all HTTP/1.1 requests are limited by MaxConnections.
+ MaxConcurrentRequests int `alias:"max_concurrent_requests"`
+}
+
type ServiceConfigEntry struct {
- Kind string
- Name string
- Namespace string `json:",omitempty"`
- Protocol string `json:",omitempty"`
- MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
- Expose ExposeConfig `json:",omitempty"`
- ExternalSNI string `json:",omitempty" alias:"external_sni"`
- Meta map[string]string `json:",omitempty"`
- CreateIndex uint64
- ModifyIndex uint64
+ Kind string
+ Name string
+ Namespace string `json:",omitempty"`
+ Protocol string `json:",omitempty"`
+ MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
+ Connect ConnectConfiguration `json:",omitempty"`
+ Expose ExposeConfig `json:",omitempty"`
+ TransparentProxy bool `json:",omitempty" alias:"transparent_proxy"`
+ ExternalSNI string `json:",omitempty" alias:"external_sni"`
+ Meta map[string]string `json:",omitempty"`
+ CreateIndex uint64
+ ModifyIndex uint64
}
func (s *ServiceConfigEntry) GetKind() string {
@@ -129,15 +206,16 @@ func (s *ServiceConfigEntry) GetModifyIndex() uint64 {
}
type ProxyConfigEntry struct {
- Kind string
- Name string
- Namespace string `json:",omitempty"`
- Config map[string]interface{} `json:",omitempty"`
- MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
- Expose ExposeConfig `json:",omitempty"`
- Meta map[string]string `json:",omitempty"`
- CreateIndex uint64
- ModifyIndex uint64
+ Kind string
+ Name string
+ Namespace string `json:",omitempty"`
+ Config map[string]interface{} `json:",omitempty"`
+ MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
+ Expose ExposeConfig `json:",omitempty"`
+ TransparentProxy bool `json:",omitempty" alias:"transparent_proxy"`
+ Meta map[string]string `json:",omitempty"`
+ CreateIndex uint64
+ ModifyIndex uint64
}
func (p *ProxyConfigEntry) GetKind() string {
diff --git a/vendor/github.com/hashicorp/consul/api/connect_ca.go b/vendor/github.com/hashicorp/consul/api/connect_ca.go
index 26a7bfb1d..9d2c26602 100644
--- a/vendor/github.com/hashicorp/consul/api/connect_ca.go
+++ b/vendor/github.com/hashicorp/consul/api/connect_ca.go
@@ -23,6 +23,14 @@ type CAConfig struct {
// configuration is an error.
State map[string]string
+ // ForceWithoutCrossSigning indicates that the CA reconfiguration should go
+ // ahead even if the current CA is unable to cross sign certificates. This
+ // risks temporary connection failures during the rollout as new leafs will be
+ // rejected by proxies that have not yet observed the new root cert but is the
+ // only option if a CA that doesn't support cross signing needs to be
+ // reconfigured or mirated away from.
+ ForceWithoutCrossSigning bool
+
CreateIndex uint64
ModifyIndex uint64
}
diff --git a/vendor/github.com/hashicorp/consul/api/go.sum b/vendor/github.com/hashicorp/consul/api/go.sum
index 57ef54399..0ee1dc2cd 100644
--- a/vendor/github.com/hashicorp/consul/api/go.sum
+++ b/vendor/github.com/hashicorp/consul/api/go.sum
@@ -83,6 +83,7 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go
index 99b9ac257..a51d41a87 100644
--- a/vendor/github.com/hashicorp/consul/api/health.go
+++ b/vendor/github.com/hashicorp/consul/api/health.go
@@ -58,6 +58,7 @@ type HealthCheckDefinition struct {
Header map[string][]string
Method string
Body string
+ TLSServerName string
TLSSkipVerify bool
TCP string
IntervalDuration time.Duration `json:"-"`
diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go
index 57876ee9f..92e2c91ca 100644
--- a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go
+++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go
@@ -334,10 +334,23 @@ func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *W
func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) {
r := op.c.newRequest("GET", "/v1/operator/autopilot/health")
r.setQueryOptions(q)
- _, resp, err := requireOK(op.c.doRequest(r))
+
+ // we cannot just use requireOK because this endpoint might use a 429 status to indicate
+ // that unhealthiness
+ _, resp, err := op.c.doRequest(r)
if err != nil {
+ if resp != nil {
+ resp.Body.Close()
+ }
return nil, err
}
+
+ // these are the only 2 status codes that would indicate that we should
+ // expect the body to contain the right format.
+ if resp.StatusCode != 200 && resp.StatusCode != 429 {
+ return nil, generateUnexpectedResponseCodeError(resp)
+ }
+
defer resp.Body.Close()
var out OperatorHealthReply
diff --git a/vendor/github.com/hashicorp/nomad/api/consul.go b/vendor/github.com/hashicorp/nomad/api/consul.go
new file mode 100644
index 000000000..64e085e61
--- /dev/null
+++ b/vendor/github.com/hashicorp/nomad/api/consul.go
@@ -0,0 +1,35 @@
+package api
+
+// Consul represents configuration related to consul.
+type Consul struct {
+ // (Enterprise-only) Namespace represents a Consul namespace.
+ Namespace string `mapstructure:"namespace" hcl:"namespace,optional"`
+}
+
+// Canonicalize Consul into a canonical form. The Canonicalize structs containing
+// a Consul should ensure it is not nil.
+func (c *Consul) Canonicalize() {
+ // Nothing to do here.
+ //
+ // If Namespace is nil, that is a choice of the job submitter that
+ // we should inherit from higher up (i.e. job<-group). Likewise, if
+ // Namespace is set but empty, that is a choice to use the default consul
+ // namespace.
+}
+
+// Copy creates a deep copy of c.
+func (c *Consul) Copy() *Consul {
+ return &Consul{
+ Namespace: c.Namespace,
+ }
+}
+
+// MergeNamespace sets Namespace to namespace if not already configured.
+// This is used to inherit the job-level consul_namespace if the group-level
+// namespace is not explicitly configured.
+func (c *Consul) MergeNamespace(namespace *string) {
+ // only inherit namespace from above if not already set
+ if c.Namespace == "" && namespace != nil {
+ c.Namespace = *namespace
+ }
+}
diff --git a/vendor/github.com/hashicorp/nomad/api/jobs.go b/vendor/github.com/hashicorp/nomad/api/jobs.go
index d65544519..1fed52c7c 100644
--- a/vendor/github.com/hashicorp/nomad/api/jobs.go
+++ b/vendor/github.com/hashicorp/nomad/api/jobs.go
@@ -817,6 +817,7 @@ type Job struct {
ParentID *string
Dispatched bool
Payload []byte
+ ConsulNamespace *string `mapstructure:"consul_namespace"`
VaultNamespace *string `mapstructure:"vault_namespace"`
NomadTokenID *string `mapstructure:"nomad_token_id"`
Status *string
@@ -878,6 +879,9 @@ func (j *Job) Canonicalize() {
if j.ConsulToken == nil {
j.ConsulToken = stringToPtr("")
}
+ if j.ConsulNamespace == nil {
+ j.ConsulNamespace = stringToPtr("")
+ }
if j.VaultToken == nil {
j.VaultToken = stringToPtr("")
}
diff --git a/vendor/github.com/hashicorp/nomad/api/tasks.go b/vendor/github.com/hashicorp/nomad/api/tasks.go
index b5c6ba119..9757518b9 100644
--- a/vendor/github.com/hashicorp/nomad/api/tasks.go
+++ b/vendor/github.com/hashicorp/nomad/api/tasks.go
@@ -430,6 +430,7 @@ type TaskGroup struct {
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"`
StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"`
Scaling *ScalingPolicy `hcl:"scaling,block"`
+ Consul *Consul `hcl:"consul,block"`
}
// NewTaskGroup creates a new TaskGroup.
@@ -462,6 +463,13 @@ func (g *TaskGroup) Canonicalize(job *Job) {
g.EphemeralDisk.Canonicalize()
}
+ // Merge job.consul onto group.consul
+ if g.Consul == nil {
+ g.Consul = new(Consul)
+ }
+ g.Consul.MergeNamespace(job.ConsulNamespace)
+ g.Consul.Canonicalize()
+
// Merge the update policy from the job
if ju, tu := job.Update != nil, g.Update != nil; ju && tu {
// Merge the jobs and task groups definition of the update strategy
diff --git a/vendor/modules.txt b/vendor/modules.txt
index d8867a6a0..4bdd7ddab 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -364,7 +364,7 @@ github.com/hashicorp/consul-template/signals
github.com/hashicorp/consul-template/template
github.com/hashicorp/consul-template/version
github.com/hashicorp/consul-template/watch
-# github.com/hashicorp/consul/api v1.8.1
+# github.com/hashicorp/consul/api v1.8.1 => github.com/hashicorp/consul/api v1.4.1-0.20210319180826-cd1cd4febd26
## explicit
github.com/hashicorp/consul/api
# github.com/hashicorp/consul/sdk v0.7.0
@@ -1048,6 +1048,7 @@ honnef.co/go/tools/version
# github.com/apparentlymart/go-textseg/v12 => github.com/apparentlymart/go-textseg/v12 v12.0.0
# github.com/godbus/dbus => github.com/godbus/dbus v5.0.1+incompatible
# github.com/golang/protobuf => github.com/golang/protobuf v1.3.4
+# github.com/hashicorp/consul/api => github.com/hashicorp/consul/api v1.4.1-0.20210319180826-cd1cd4febd26
# github.com/hashicorp/go-discover => github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f
# github.com/hashicorp/hcl => github.com/hashicorp/hcl v1.0.1-0.20201016140508-a07e7d50bbee
# github.com/hashicorp/nomad/api => ./api
diff --git a/website/content/docs/commands/job/run.mdx b/website/content/docs/commands/job/run.mdx
index 567adcc43..d2ea4fa64 100644
--- a/website/content/docs/commands/job/run.mdx
+++ b/website/content/docs/commands/job/run.mdx
@@ -83,6 +83,13 @@ that volume.
storing it in the job file. This overrides the token found in the
`$CONSUL_HTTP_TOKEN` environment variable and that found in the job.
+- `-consul-namespace`: If set, any services in the job will be registered into the
+ specified Consul namespace. Any `template` stanza reading from Consul KV will
+ scoped to the the specified Consul namespace. If Consul ACLs are enabled and the
+ [allow_unauthenticated] Nomad server Consul configuration is not enabled, then
+ a Consul token must be supplied with appropriate service and kv Consul ACL policy
+ permissions.
+
- `-vault-token`: If set, the passed Vault token is stored in the job before
sending to the Nomad servers. This allows passing the Vault token without
storing it in the job file. This overrides the token found in the
@@ -150,3 +157,4 @@ $ nomad job run failing.nomad
[`job plan` command]: /docs/commands/job/plan
[eval status]: /docs/commands/eval-status
[job specification]: /docs/job-specification
+[allow_unauthenticated]: /docs/configuration/consul#allow_unauthenticated
diff --git a/website/content/docs/integrations/consul-integration.mdx b/website/content/docs/integrations/consul-integration.mdx
index 4d44427fa..7e4678870 100644
--- a/website/content/docs/integrations/consul-integration.mdx
+++ b/website/content/docs/integrations/consul-integration.mdx
@@ -51,6 +51,56 @@ general configurations within a Nomad task.
For more information on Nomad's template stanza and how it leverages Consul Template,
please see the [`template` job specification documentation](/docs/job-specification/template).
+## Consul Namespaces
+
+Nomad provides integration with [Consul Namespaces][consul_namespaces] for service
+registrations specified in `service` blocks and Consul KV reads in `template`
+blocks.
+
+By default, Nomad will not specify a Consul namespace on service registrations or
+KV store reads, which Consul then implicitly resolves to the `"default"` namespace.
+This default namespace behavior can be modified by setting the [`namespace`][consul_agent_namespace] field
+in the Nomad agent Consul configuration block.
+
+For more control over Consul namespaces, Nomad Enterprise supports configuring the
+Consul namespace at the [group level][consul_group_namespace] in the Nomad job spec
+as well as the [`-consul-namespace`][consul_run_namespace] command line argument for `job run`.
+
+The Consul namespace used for a set of group or task service registrations within
+a group, as well as `template` KV store access is determined from the following
+hierarchy from lowest to highest precedence:
+
+- consul default: If no Consul namespace options are configured, Consul will automatically
+ make use of the `"default"` namespace.
+
+- agent configuration: If the [`namespace`][consul_agent_namespace] Nomad agent
+ Consul configuration parameter is set, this namespace will be used instead of
+ the Consul default.
+
+- job run command: If the [`-consul-namespace`][consul_run_namespace]
+ command line argument is specified on job submission, this namespace will take
+ precedence over the namespace set in Nomad agent configuration.
+
+- group configuration: If the [group level][consul_group_namespace]
+ Consul namespace is configured, this namespace will take precedence over all other
+ options.
+
+If [Consul ACLs][consul_acls] are enabled, the [allow_unauthenticated] configuration parameter
+will control whether a Consul token will be required when submitting a job with
+Consul namespace configured. The provided Consul token must belong to the correct
+namespace, and must be backed by a Consul ACL Policy with sufficient `service:write`
+`kv:read` permissions. An example policy might look like,
+
+```hcl
+key_prefix "" {
+ policy = "read"
+}
+
+service_prefix "" {
+ policy = "write"
+}
+```
+
## Assumptions
- Consul 0.7.2 or later is needed for `tls_skip_verify` in HTTP checks.
@@ -71,5 +121,11 @@ please see the [`template` job specification documentation](/docs/job-specificat
way is to run the container in the host networking mode, or make the Consul
agent listen on an interface in the network namespace of the container.
+[allow_unauthenticated]: /docs/configuration/consul#allow_unauthenticated
[consul]: https://www.consul.io/ 'Consul by HashiCorp'
+[consul_acls]: https://www.consul.io/docs/security/acl
+[consul_namespaces]: https://www.consul.io/docs/enterprise/namespaces
+[consul_agent_namespace]: /docs/configuration/consul#namespace
+[consul_group_namespace]: /docs/job-specification/group#namespace
+[consul_run_namespace]: /docs/commands/job/run#consul-namespace
[service]: /docs/job-specification/service 'Nomad service Job Specification'
diff --git a/website/content/docs/job-specification/group.mdx b/website/content/docs/job-specification/group.mdx
index f93cda6a2..509124a2a 100644
--- a/website/content/docs/job-specification/group.mdx
+++ b/website/content/docs/job-specification/group.mdx
@@ -40,6 +40,9 @@ job "docs" {
`min` value specified in the [`scaling`](/docs/job-specification/scaling)
block, if present; otherwise, this defaults to `1`.
+- `consul` ([Consul][consul]: nil) - Specifies Consul configuration
+ options specific to the group.
+
- `ephemeral_disk` ([EphemeralDisk][]: nil) - Specifies the
ephemeral disk requirements of the group. Ephemeral disks can be marked as
sticky and support live data migrations.
@@ -103,6 +106,14 @@ job "docs" {
- `volume` ([Volume][]: nil) - Specifies the volumes that are
required by tasks within the group.
+### `consul` Parameters
+
+- `namespace` `(string: "")` - The Consul namespace in which
+ group and task-level services within the group will be registered. Use of
+ `template` to access Consul KV will read from the specified Consul namespace.
+ Specifying `namespace` takes precedence over the [`-consul-namespace`][consul_namespace]
+ command line argument in `job run`.
+
## `group` Examples
The following examples only show the `group` stanzas. Remember that the
@@ -244,6 +255,8 @@ group "second" {
[task]: /docs/job-specification/task 'Nomad task Job Specification'
[job]: /docs/job-specification/job 'Nomad job Job Specification'
[constraint]: /docs/job-specification/constraint 'Nomad constraint Job Specification'
+[consul]: /docs/job-specification/group#consul-parameters
+[consul_namespace]: /docs/commands/job/run#consul-namespace
[spread]: /docs/job-specification/spread 'Nomad spread Job Specification'
[affinity]: /docs/job-specification/affinity 'Nomad affinity Job Specification'
[ephemeraldisk]: /docs/job-specification/ephemeral_disk 'Nomad ephemeral_disk Job Specification'