mirror of
https://github.com/kemko/nomad.git
synced 2026-01-01 16:05:42 +03:00
consul: support admin partitions (#19665)
Add support for Consul Enterprise admin partitions. We added fingerprinting in https://github.com/hashicorp/nomad/pull/19485. This PR adds a `consul.partition` field. The expectation is that most users will create a mapping of Nomad node pool to Consul admin partition. But we'll also create an implicit constraint for the fingerprinted value. Fixes: https://github.com/hashicorp/nomad/issues/13139
This commit is contained in:
3
.changelog/19665.txt
Normal file
3
.changelog/19665.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
consul: Added support for Consul Enterprise admin partitions
|
||||
```
|
||||
@@ -16,6 +16,10 @@ type Consul struct {
|
||||
|
||||
// (Enterprise-only) Cluster represents a specific Consul cluster.
|
||||
Cluster string `mapstructure:"cluster" hcl:"cluster,optional"`
|
||||
|
||||
// Partition is the Consul admin partition where the workload should
|
||||
// run. This is available in Nomad CE but only works with Consul ENT
|
||||
Partition string `mapstructure:"partition" hcl:"partition,optional"`
|
||||
}
|
||||
|
||||
// Canonicalize Consul into a canonical form. The Canonicalize structs containing
|
||||
@@ -29,6 +33,9 @@ func (c *Consul) Canonicalize() {
|
||||
// we should inherit from higher up (i.e. job<-group). Likewise, if
|
||||
// Namespace is set but empty, that is a choice to use the default consul
|
||||
// namespace.
|
||||
|
||||
// Partition should never be defaulted to "default" because non-ENT Consul
|
||||
// clusters don't have admin partitions
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of c.
|
||||
@@ -36,6 +43,7 @@ func (c *Consul) Copy() *Consul {
|
||||
return &Consul{
|
||||
Namespace: c.Namespace,
|
||||
Cluster: c.Cluster,
|
||||
Partition: c.Partition,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1942,6 +1942,7 @@ func apiConsulToStructs(in *api.Consul) *structs.Consul {
|
||||
return &structs.Consul{
|
||||
Namespace: in.Namespace,
|
||||
Cluster: in.Cluster,
|
||||
Partition: in.Partition,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,12 @@
|
||||
|
||||
package nomad
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
// jobConsulHook is a job registration admission controller for Consul
|
||||
// configuration in Consul, Service, and Template blocks
|
||||
type jobConsulHook struct {
|
||||
@@ -12,3 +18,15 @@ type jobConsulHook struct {
|
||||
func (jobConsulHook) Name() string {
|
||||
return "consul"
|
||||
}
|
||||
|
||||
// validateTaskPartitionMatchesGroup validates that any partition set for the
|
||||
// task.Consul matches any partition set for the group
|
||||
func (jobConsulHook) validateTaskPartitionMatchesGroup(groupPartition string, taskConsul *structs.Consul) error {
|
||||
if taskConsul.Partition == "" || groupPartition == "" {
|
||||
return nil
|
||||
}
|
||||
if taskConsul.Partition != groupPartition {
|
||||
return fmt.Errorf("task.consul.partition %q must match group.consul.partition %q if both are set", taskConsul.Partition, groupPartition)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -28,7 +28,11 @@ func (h jobConsulHook) Validate(job *structs.Job) ([]error, error) {
|
||||
}
|
||||
|
||||
for _, group := range job.TaskGroups {
|
||||
|
||||
groupPartition := ""
|
||||
|
||||
if group.Consul != nil {
|
||||
groupPartition = group.Consul.Partition
|
||||
if err := h.validateCluster(group.Consul.Cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -56,6 +60,11 @@ func (h jobConsulHook) Validate(job *structs.Job) ([]error, error) {
|
||||
}
|
||||
|
||||
if task.Consul != nil {
|
||||
err := h.validateTaskPartitionMatchesGroup(groupPartition, task.Consul)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := h.validateCluster(task.Consul.Cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -87,12 +96,26 @@ func (h jobConsulHook) validateCluster(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func consulPartitionConstraint(partition string) *structs.Constraint {
|
||||
return &structs.Constraint{
|
||||
LTarget: "${attr.consul.partition}",
|
||||
RTarget: partition,
|
||||
Operand: "=",
|
||||
}
|
||||
}
|
||||
|
||||
// Mutate ensures that the job's Consul cluster has been configured to be the
|
||||
// default Consul cluster if unset
|
||||
func (j jobConsulHook) Mutate(job *structs.Job) (*structs.Job, []error, error) {
|
||||
for _, group := range job.TaskGroups {
|
||||
if group.Consul != nil && group.Consul.Cluster == "" {
|
||||
group.Consul.Cluster = structs.ConsulDefaultCluster
|
||||
if group.Consul != nil {
|
||||
if group.Consul.Cluster == "" {
|
||||
group.Consul.Cluster = structs.ConsulDefaultCluster
|
||||
}
|
||||
if group.Consul.Partition != "" {
|
||||
group.Constraints = append(group.Constraints,
|
||||
consulPartitionConstraint(group.Consul.Partition))
|
||||
}
|
||||
}
|
||||
|
||||
for _, service := range group.Services {
|
||||
@@ -102,8 +125,14 @@ func (j jobConsulHook) Mutate(job *structs.Job) (*structs.Job, []error, error) {
|
||||
}
|
||||
|
||||
for _, task := range group.Tasks {
|
||||
if task.Consul != nil && task.Consul.Cluster == "" {
|
||||
task.Consul.Cluster = structs.ConsulDefaultCluster
|
||||
if task.Consul != nil {
|
||||
if task.Consul.Cluster == "" {
|
||||
task.Consul.Cluster = structs.ConsulDefaultCluster
|
||||
}
|
||||
if task.Consul.Partition != "" {
|
||||
task.Constraints = append(task.Constraints,
|
||||
consulPartitionConstraint(task.Consul.Partition))
|
||||
}
|
||||
}
|
||||
for _, service := range task.Services {
|
||||
if service.IsConsul() && service.Cluster == "" {
|
||||
|
||||
@@ -39,6 +39,9 @@ func TestJobEndpointHook_ConsulCE(t *testing.T) {
|
||||
job.TaskGroups[0].Services[0].Cluster = ""
|
||||
job.TaskGroups[0].Services[1].Cluster = "infra"
|
||||
|
||||
// assign to a specific partition
|
||||
job.TaskGroups[0].Consul = &structs.Consul{Partition: "foo"}
|
||||
|
||||
hook := jobConsulHook{srv}
|
||||
|
||||
_, _, err := hook.Mutate(job)
|
||||
@@ -48,6 +51,13 @@ func TestJobEndpointHook_ConsulCE(t *testing.T) {
|
||||
test.Eq(t, "infra", job.TaskGroups[0].Services[1].Cluster)
|
||||
test.Eq(t, "nondefault", job.TaskGroups[0].Tasks[0].Services[0].Cluster)
|
||||
|
||||
test.SliceContains(t, job.TaskGroups[0].Constraints,
|
||||
&structs.Constraint{
|
||||
LTarget: "${attr.consul.partition}",
|
||||
RTarget: "foo",
|
||||
Operand: "=",
|
||||
})
|
||||
|
||||
_, err = hook.Validate(job)
|
||||
must.EqError(t, err, "non-default Consul cluster requires Nomad Enterprise")
|
||||
}
|
||||
|
||||
@@ -33,6 +33,11 @@ type Consul struct {
|
||||
|
||||
// Cluster (by name) to send API requests to
|
||||
Cluster string
|
||||
|
||||
// Partition is the Consul admin partition where the workload should
|
||||
// run. Note that this should never be defaulted to "default" because
|
||||
// non-ENT Consul clusters don't have admin partitions
|
||||
Partition string
|
||||
}
|
||||
|
||||
// Copy the Consul block.
|
||||
@@ -43,6 +48,7 @@ func (c *Consul) Copy() *Consul {
|
||||
return &Consul{
|
||||
Namespace: c.Namespace,
|
||||
Cluster: c.Cluster,
|
||||
Partition: c.Partition,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,6 +63,9 @@ func (c *Consul) Equal(o *Consul) bool {
|
||||
if c.Cluster != o.Cluster {
|
||||
return false
|
||||
}
|
||||
if c.Partition != o.Partition {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -2160,6 +2160,8 @@ func TestTaskGroupDiff(t *testing.T) {
|
||||
New: &TaskGroup{
|
||||
Consul: &Consul{
|
||||
Namespace: "team2",
|
||||
Cluster: "us-east-1",
|
||||
Partition: "us-east-1a",
|
||||
},
|
||||
},
|
||||
Expected: &TaskGroupDiff{
|
||||
@@ -2169,12 +2171,24 @@ func TestTaskGroupDiff(t *testing.T) {
|
||||
Type: DiffTypeEdited,
|
||||
Name: "Consul",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "Cluster",
|
||||
Old: "",
|
||||
New: "us-east-1",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "Namespace",
|
||||
Old: "team1",
|
||||
New: "team2",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "Partition",
|
||||
Old: "",
|
||||
New: "us-east-1a",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -384,6 +384,10 @@ func consulUpdated(consulA, consulB *structs.Consul) comparison {
|
||||
if a, b := consulA.Cluster, consulB.Cluster; a != b {
|
||||
return difference("consul cluster", a, b)
|
||||
}
|
||||
|
||||
if a, b := consulA.Partition, consulB.Partition; a != b {
|
||||
return difference("consul partition", a, b)
|
||||
}
|
||||
}
|
||||
|
||||
return same
|
||||
|
||||
@@ -28,6 +28,7 @@ job "docs" {
|
||||
consul {
|
||||
cluster = "default"
|
||||
namespace = "default"
|
||||
partition = "default"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -95,6 +96,13 @@ The [`template`][template] block can use the Consul token as well.
|
||||
namespace. Specifying `namespace` takes precedence over the
|
||||
[`-consul-namespace`][flag_consul_namespace] command line argument in `job run`.
|
||||
|
||||
- `partition` `(string: "")` - When this field is set, a constraint will be
|
||||
added to the group or task to ensure that the allocation is placed on a Nomad
|
||||
client that has a Consul Enterprise agent in the specified Consul [admin
|
||||
partition][]. Note that Consul Community Edition agents are not assigned to
|
||||
any admin partition, so this field should not be used without Consul
|
||||
Enterprise.
|
||||
|
||||
## `consul` Examples
|
||||
|
||||
The following examples only show the `consul` blocks or other relevant
|
||||
@@ -238,3 +246,4 @@ job "docs" {
|
||||
[`consul.name`]: /nomad/docs/configuration/consul#name
|
||||
[flag_consul_namespace]: /nomad/docs/commands/job/run#consul-namespace
|
||||
[Connect]: /nomad/docs/job-specification/connect
|
||||
[admin partition]: /consul/docs/enterprise/admin-partitions
|
||||
|
||||
Reference in New Issue
Block a user