E2E: use a self-hosted Consul for easier WI testing (#20256)

Our `consulcompat` tests exercise both the Workload Identity and legacy Consul
token workflow, but they are limited to running single node tests. The E2E
cluster is network isolated, so using our HCP Consul cluster runs into a
problem validating WI tokens because it can't reach the JWKS endpoint. In real
production environments, you'd solve this with a CNAME pointing to a public IP
pointing to a proxy with a real domain name. But that's logisitcally
impractical for our ephemeral nightly cluster.

Migrate the HCP Consul to a single-node Consul cluster on AWS EC2 alongside our
Nomad cluster. Bootstrap TLS and ACLs in Terraform and ensure all nodes can
reach each other. This will allow us to update our Consul tests so they can use
Workload Identity, in a separate PR.

Ref: #19698
This commit is contained in:
Tim Gross
2024-04-02 15:24:51 -04:00
committed by GitHub
parent 31f53cec01
commit cf25cf5cd5
24 changed files with 564 additions and 184 deletions

View File

@@ -4,10 +4,10 @@ This folder contains Terraform resources for provisioning a Nomad
cluster on EC2 instances on AWS to use as the target of end-to-end
tests.
Terraform provisions the AWS infrastructure assuming that EC2 AMIs
have already been built via Packer and HCP Consul and HCP Vault
clusters are already running. It deploys a build of Nomad from your
local machine along with configuration files.
Terraform provisions the AWS infrastructure assuming that EC2 AMIs have already
been built via Packer and a HCP Vault cluster is already running. It deploys a
build of Nomad from your local machine along with configuration files, as well
as a single-node Consul server cluster.
## Setup
@@ -30,8 +30,6 @@ team's vault under `nomad-e2e`.
```
export HCP_CLIENT_ID=
export HCP_CLIENT_SECRET=
export CONSUL_HTTP_TOKEN=
export CONSUL_HTTP_ADDR=
```
The Vault admin token will expire after 6 hours. If you haven't
@@ -57,6 +55,8 @@ client_count_ubuntu_jammy_amd64 = "4"
client_count_windows_2016_amd64 = "1"
```
You will also need a Consul Enterprise license file.
Optionally, edit the `nomad_local_binary` variable in the
`terraform.tfvars` file to change the path to the local binary of
Nomad you'd like to upload.

View File

@@ -58,6 +58,23 @@ resource "aws_instance" "client_windows_2016_amd64" {
}
}
resource "aws_instance" "consul_server" {
ami = data.aws_ami.ubuntu_jammy_amd64.image_id
instance_type = var.instance_type
key_name = module.keys.key_name
vpc_security_group_ids = [aws_security_group.consul_server.id]
iam_instance_profile = data.aws_iam_instance_profile.nomad_e2e_cluster.name
availability_zone = var.availability_zone
# Instance tags
tags = {
Name = "${local.random_name}-consul-server-ubuntu-jammy-amd64"
ConsulAutoJoin = "auto-join-${local.random_name}"
User = data.aws_caller_identity.current.arn
}
}
data "external" "packer_sha" {
program = ["/bin/sh", "-c", <<EOT
sha=$(git log -n 1 --pretty=format:%H packer)

View File

@@ -0,0 +1,81 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
# consul-client.tf produces the TLS certifications and configuration files for
# the Consul agents running on the Nomad server and client nodes
# TLS certs for the Consul agents
resource "tls_private_key" "consul_agents" {
algorithm = "ECDSA"
ecdsa_curve = "P384"
}
resource "tls_cert_request" "consul_agents" {
private_key_pem = tls_private_key.consul_agents.private_key_pem
subject {
common_name = "${local.random_name} Consul agent"
}
}
resource "tls_locally_signed_cert" "consul_agents" {
cert_request_pem = tls_cert_request.consul_agents.cert_request_pem
ca_private_key_pem = tls_private_key.ca.private_key_pem
ca_cert_pem = tls_self_signed_cert.ca.cert_pem
validity_period_hours = 720
allowed_uses = [
"key_encipherment",
"digital_signature",
"client_auth",
]
}
resource "local_sensitive_file" "consul_agents_key" {
content = tls_private_key.consul_agents.private_key_pem
filename = "uploads/shared/consul.d/agent_cert.key.pem"
}
resource "local_sensitive_file" "consul_agents_cert" {
content = tls_locally_signed_cert.consul_agents.cert_pem
filename = "uploads/shared/consul.d/agent_cert.pem"
}
# Consul tokens for the Consul agents
resource "random_uuid" "consul_agent_token" {}
resource "local_sensitive_file" "consul_agent_config_file" {
content = templatefile("etc/consul.d/clients.hcl", {
token = "${random_uuid.consul_agent_token.result}"
autojoin_value = "auto-join-${local.random_name}"
})
filename = "uploads/shared/consul.d/clients.hcl"
file_permission = "0600"
}
# Consul tokens for the Nomad agents
resource "random_uuid" "consul_token_for_nomad" {}
resource "local_sensitive_file" "nomad_client_config_for_consul" {
content = templatefile("etc/nomad.d/client-consul.hcl", {
token = "${random_uuid.consul_token_for_nomad.result}"
client_service_name = "client-${local.random_name}"
server_service_name = "server-${local.random_name}"
})
filename = "uploads/shared/nomad.d/client-consul.hcl"
file_permission = "0600"
}
resource "local_sensitive_file" "nomad_server_config_for_consul" {
content = templatefile("etc/nomad.d/server-consul.hcl", {
token = "${random_uuid.consul_token_for_nomad.result}"
client_service_name = "client-${local.random_name}"
server_service_name = "server-${local.random_name}"
})
filename = "uploads/shared/nomad.d/server-consul.hcl"
file_permission = "0600"
}

View File

@@ -0,0 +1,178 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
# consul-servers.tf produces the TLS certifications and configuration files for
# the single-node Consul server cluster
# Consul token for bootstrapping the Consul server
resource "random_uuid" "consul_initial_management_token" {}
resource "local_sensitive_file" "consul_initial_management_token" {
content = random_uuid.consul_initial_management_token.result
filename = "keys/consul_initial_management_token"
file_permission = "0600"
}
resource "local_sensitive_file" "consul_server_config_file" {
content = templatefile("etc/consul.d/servers.hcl", {
management_token = "${random_uuid.consul_initial_management_token.result}"
token = "${random_uuid.consul_agent_token.result}"
nomad_token = "${random_uuid.consul_token_for_nomad.result}"
autojoin_value = "auto-join-${local.random_name}"
})
filename = "uploads/shared/consul.d/servers.hcl"
file_permission = "0600"
}
# TLS cert for the Consul server
resource "tls_private_key" "consul_server" {
algorithm = "ECDSA"
ecdsa_curve = "P384"
}
resource "tls_cert_request" "consul_server" {
private_key_pem = tls_private_key.consul_server.private_key_pem
ip_addresses = [aws_instance.consul_server.public_ip, aws_instance.consul_server.private_ip, "127.0.0.1"]
dns_names = ["server.consul.global"]
subject {
common_name = "${local.random_name} Consul server"
}
}
resource "tls_locally_signed_cert" "consul_server" {
cert_request_pem = tls_cert_request.consul_server.cert_request_pem
ca_private_key_pem = tls_private_key.ca.private_key_pem
ca_cert_pem = tls_self_signed_cert.ca.cert_pem
validity_period_hours = 720
allowed_uses = [
"key_encipherment",
"digital_signature",
"client_auth",
"server_auth",
]
}
resource "local_sensitive_file" "consul_server_key" {
content = tls_private_key.consul_server.private_key_pem
filename = "uploads/shared/consul.d/server_cert.key.pem"
}
resource "local_sensitive_file" "consul_server_cert" {
content = tls_locally_signed_cert.consul_server.cert_pem
filename = "uploads/shared/consul.d/server_cert.pem"
}
# if consul_license is unset, it'll be a harmless empty license file
resource "local_sensitive_file" "consul_environment" {
content = templatefile("etc/consul.d/.environment", {
license = var.consul_license
})
filename = "uploads/shared/consul.d/.environment"
file_permission = "0600"
}
resource "null_resource" "upload_consul_server_configs" {
depends_on = [
local_sensitive_file.ca_cert,
local_sensitive_file.consul_server_config_file,
local_sensitive_file.consul_server_key,
local_sensitive_file.consul_server_cert,
local_sensitive_file.consul_environment,
]
connection {
type = "ssh"
user = "ubuntu"
host = aws_instance.consul_server.public_ip
port = 22
private_key = file("${path.root}/keys/${local.random_name}.pem")
target_platform = "unix"
timeout = "15m"
}
provisioner "file" {
source = "keys/tls_ca.crt"
destination = "/tmp/consul_ca.pem"
}
provisioner "file" {
source = "uploads/shared/consul.d/.environment"
destination = "/tmp/.consul_environment"
}
provisioner "file" {
source = "uploads/shared/consul.d/server_cert.pem"
destination = "/tmp/consul_cert.pem"
}
provisioner "file" {
source = "uploads/shared/consul.d/server_cert.key.pem"
destination = "/tmp/consul_cert.key.pem"
}
provisioner "file" {
source = "uploads/shared/consul.d/servers.hcl"
destination = "/tmp/consul_server.hcl"
}
provisioner "file" {
source = "etc/consul.d/consul-server.service"
destination = "/tmp/consul.service"
}
}
resource "null_resource" "install_consul_server_configs" {
depends_on = [
null_resource.upload_consul_server_configs,
]
connection {
type = "ssh"
user = "ubuntu"
host = aws_instance.consul_server.public_ip
port = 22
private_key = file("${path.root}/keys/${local.random_name}.pem")
target_platform = "unix"
timeout = "15m"
}
provisioner "remote-exec" {
inline = [
"sudo rm -rf /etc/consul.d/*",
"sudo mkdir -p /etc/consul.d/bootstrap",
"sudo mv /tmp/consul_ca.pem /etc/consul.d/ca.pem",
"sudo mv /tmp/consul_cert.pem /etc/consul.d/cert.pem",
"sudo mv /tmp/consul_cert.key.pem /etc/consul.d/cert.key.pem",
"sudo mv /tmp/consul_server.hcl /etc/consul.d/consul.hcl",
"sudo mv /tmp/consul.service /etc/systemd/system/consul.service",
"sudo mv /tmp/.consul_environment /etc/consul.d/.environment",
"sudo systemctl daemon-reload",
"sudo systemctl enable consul",
"sudo systemctl restart consul",
]
}
}
# Bootstrapping Consul ACLs:
#
# We can't both bootstrap the ACLs and use the Consul TF provider's
# resource.consul_acl_token in the same Terraform run, because there's no way to
# get the management token into the provider's environment after we bootstrap,
# and we want to pass various tokens in the Nomad and Consul configuration
# files. So we run a bootstrapping script that uses tokens we generate randomly.
resource "null_resource" "bootstrap_consul_acls" {
depends_on = [null_resource.install_consul_server_configs]
provisioner "local-exec" {
command = "./scripts/bootstrap-consul.sh"
environment = {
CONSUL_HTTP_ADDR = "https://${aws_instance.consul_server.public_ip}:8501"
CONSUL_CACERT = "keys/tls_ca.crt"
CONSUL_HTTP_TOKEN = "${random_uuid.consul_initial_management_token.result}"
CONSUL_AGENT_TOKEN = "${random_uuid.consul_agent_token.result}"
NOMAD_CLUSTER_CONSUL_TOKEN = "${random_uuid.consul_token_for_nomad.result}"
}
}
}

View File

@@ -0,0 +1 @@
CONSUL_LICENSE=${license}

View File

@@ -1,8 +0,0 @@
{
"acl": {
"tokens": {
"agent": "${token}",
"default": "${token}"
}
}
}

View File

@@ -0,0 +1,42 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
log_level = "DEBUG"
data_dir = "/opt/consul/data"
bind_addr = "{{ GetPrivateIP }}"
advertise_addr = "{{ GetPrivateIP }}"
client_addr = "0.0.0.0"
server = false
acl {
enabled = true
tokens {
agent = "${token}"
default = "${token}"
}
}
retry_join = ["provider=aws tag_key=ConsulAutoJoin tag_value=${autojoin_value}"]
tls {
defaults {
ca_file = "/etc/consul.d/ca.pem"
cert_file = "/etc/consul.d/cert.pem"
key_file = "/etc/consul.d/cert.key.pem"
}
}
connect {
enabled = true
}
service {
name = "consul"
}
ports {
grpc = 8502
grpc_tls = 8503
dns = 8600
}

View File

@@ -1,12 +0,0 @@
{
"data_dir": "/opt/consul/data",
"bind_addr": "0.0.0.0",
"client_addr": "0.0.0.0",
"advertise_addr": "{{ GetPrivateIP }}",
"connect": {
"enabled": true
},
"ports": {
"grpc": 8502
}
}

View File

@@ -0,0 +1,20 @@
[Unit]
Description=Consul Server
Documentation=https://developer.hashicorp.com/consul/docs
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=/etc/consul.d/consul.hcl
[Service]
EnvironmentFile=-/etc/consul.d/.environment
User=consul
Group=consul
ExecStart=/usr/bin/consul agent -config-dir=/etc/consul.d/
ExecReload=/bin/kill --signal HUP $MAINPID
KillMode=process
KillSignal=SIGTERM
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,47 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
log_level = "DEBUG"
data_dir = "/opt/consul/data"
bind_addr = "{{ GetPrivateIP }}"
advertise_addr = "{{ GetPrivateIP }}"
client_addr = "0.0.0.0"
server = true
bootstrap_expect = 1
ui_config {
enabled = true
}
acl {
enabled = true
tokens {
initial_management = "${management_token}"
agent = "${token}"
default = "${token}"
}
}
retry_join = ["provider=aws tag_key=ConsulAutoJoin tag_value=${autojoin_value}"]
tls {
defaults {
ca_file = "/etc/consul.d/ca.pem"
cert_file = "/etc/consul.d/cert.pem"
key_file = "/etc/consul.d/cert.key.pem"
}
}
connect {
enabled = true
}
service {
name = "consul"
}
ports {
https = 8501
grpc_tls = 8502
}

View File

@@ -1,6 +1,7 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
# TODO: add workload-identity configuration for servers
consul {
address = "127.0.0.1:8500"
token = "${token}"

View File

@@ -0,0 +1,10 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
# TODO: add workload-identity configuration for servers
consul {
address = "127.0.0.1:8500"
token = "${token}"
client_service_name = "${client_service_name}"
server_service_name = "${server_service_name}"
}

View File

@@ -1,130 +0,0 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
# Note: the test environment must have the following values set:
# export HCP_CLIENT_ID=
# export HCP_CLIENT_SECRET=
# export CONSUL_HTTP_TOKEN=
# export CONSUL_HTTP_ADDR=
data "hcp_consul_cluster" "e2e_shared_consul" {
cluster_id = var.hcp_consul_cluster_id
}
# policy and configuration for the Consul Agent
resource "consul_acl_policy" "consul_agent" {
name = "${local.random_name}_consul_agent_policy"
datacenters = [var.hcp_consul_cluster_id]
rules = data.local_file.consul_policy_for_consul_agent.content
}
data "local_file" "consul_policy_for_consul_agent" {
filename = "${path.root}/etc/acls/consul/consul-agent-policy.hcl"
}
resource "consul_acl_token" "consul_agent_token" {
description = "Consul agent token"
policies = [consul_acl_policy.consul_agent.name]
local = true
}
data "consul_acl_token_secret_id" "consul_agent_token" {
accessor_id = consul_acl_token.consul_agent_token.id
}
resource "local_sensitive_file" "consul_acl_file" {
content = templatefile("etc/consul.d/client_acl.json", {
token = data.consul_acl_token_secret_id.consul_agent_token.secret_id
})
filename = "uploads/shared/consul.d/client_acl.json"
file_permission = "0600"
}
resource "local_sensitive_file" "consul_ca_file" {
content = base64decode(data.hcp_consul_cluster.e2e_shared_consul.consul_ca_file)
filename = "uploads/shared/consul.d/ca.pem"
file_permission = "0600"
}
resource "local_sensitive_file" "consul_config_file" {
content = base64decode(data.hcp_consul_cluster.e2e_shared_consul.consul_config_file)
filename = "uploads/shared/consul.d/consul_client.json"
file_permission = "0644"
}
resource "local_sensitive_file" "consul_base_config_file" {
content = templatefile("${path.root}/etc/consul.d/clients.json", {})
filename = "uploads/shared/consul.d/consul_client_base.json"
file_permission = "0644"
}
resource "local_sensitive_file" "consul_systemd_unit_file" {
content = templatefile("${path.root}/etc/consul.d/consul.service", {})
filename = "uploads/shared/consul.d/consul.service"
file_permission = "0644"
}
# Nomad servers configuration for Consul
resource "consul_acl_policy" "nomad_servers" {
name = "${local.random_name}_nomad_server_policy"
datacenters = [var.hcp_consul_cluster_id]
rules = data.local_file.consul_policy_for_nomad_server.content
}
data "local_file" "consul_policy_for_nomad_server" {
filename = "${path.root}/etc/acls/consul/nomad-server-policy.hcl"
}
resource "consul_acl_token" "nomad_servers_token" {
description = "Nomad servers token"
policies = [consul_acl_policy.nomad_servers.name]
local = true
}
data "consul_acl_token_secret_id" "nomad_servers_token" {
accessor_id = consul_acl_token.nomad_servers_token.id
}
resource "local_sensitive_file" "nomad_server_config_for_consul" {
content = templatefile("etc/nomad.d/consul.hcl", {
token = data.consul_acl_token_secret_id.nomad_servers_token.secret_id
client_service_name = "client-${local.random_name}"
server_service_name = "server-${local.random_name}"
})
filename = "uploads/shared/nomad.d/server-consul.hcl"
file_permission = "0600"
}
# Nomad clients configuration for Consul
resource "consul_acl_policy" "nomad_clients" {
name = "${local.random_name}_nomad_client_policy"
datacenters = [var.hcp_consul_cluster_id]
rules = data.local_file.consul_policy_for_nomad_clients.content
}
data "local_file" "consul_policy_for_nomad_clients" {
filename = "${path.root}/etc/acls/consul/nomad-client-policy.hcl"
}
resource "consul_acl_token" "nomad_clients_token" {
description = "Nomad clients token"
policies = [consul_acl_policy.nomad_clients.name]
local = true
}
data "consul_acl_token_secret_id" "nomad_clients_token" {
accessor_id = consul_acl_token.nomad_clients_token.id
}
resource "local_sensitive_file" "nomad_client_config_for_consul" {
content = templatefile("etc/nomad.d/consul.hcl", {
token = data.consul_acl_token_secret_id.nomad_clients_token.secret_id
client_service_name = "client-${local.random_name}"
server_service_name = "server-${local.random_name}"
})
filename = "uploads/shared/nomad.d/client-consul.hcl"
file_permission = "0600"
}

View File

@@ -213,3 +213,48 @@ resource "aws_network_interface" "clients_secondary" {
device_index = 1
}
}
resource "aws_security_group" "consul_server" {
name = "${local.random_name}-consul-server"
vpc_id = data.aws_vpc.default.id
# SSH from test runner
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [local.ingress_cidr]
}
# Consul HTTP and RPC from test runner
ingress {
from_port = 8500
to_port = 8503
protocol = "tcp"
cidr_blocks = [local.ingress_cidr]
}
# Consul HTTP and RPC from Consul agents
ingress {
from_port = 8500
to_port = 8503
protocol = "tcp"
security_groups = [aws_security_group.clients.id, aws_security_group.servers.id]
}
# Consul Server internal from Consul agents
ingress {
from_port = 8300
to_port = 8302
protocol = "tcp"
security_groups = [aws_security_group.clients.id, aws_security_group.servers.id]
}
# allow all outbound
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

View File

@@ -7,19 +7,17 @@
# to get the management token into the provider's environment after we bootstrap.
# So we run a bootstrapping script and write our management token into a file
# that we read in for the output of $(terraform output environment) later.
locals {
nomad_env = "NOMAD_ADDR=https://${aws_instance.server.0.public_ip}:4646 NOMAD_CACERT=keys/tls_ca.crt NOMAD_CLIENT_CERT=keys/tls_api_client.crt NOMAD_CLIENT_KEY=keys/tls_api_client.key"
}
resource "null_resource" "bootstrap_nomad_acls" {
depends_on = [module.nomad_server]
triggers = {
command = aws_instance.server.0.public_ip != "" ? local.nomad_env : "echo 'Nomad server not ready yet, skipping bootstrap'"
}
depends_on = [module.nomad_server, null_resource.bootstrap_consul_acls]
provisioner "local-exec" {
command = "${local.nomad_env} ./scripts/bootstrap-nomad.sh"
command = "./scripts/bootstrap-nomad.sh"
environment = {
NOMAD_ADDR = "https://${aws_instance.server.0.public_ip}:4646"
NOMAD_CACERT = "keys/tls_ca.crt"
NOMAD_CLIENT_CERT = "keys/tls_api_client.crt"
NOMAD_CLIENT_KEY = "keys/tls_api_client.key"
}
}
}
@@ -41,6 +39,9 @@ export NOMAD_SKIP_VERIFY=true
export NOMAD_CLIENT_CERT=/etc/nomad.d/tls/agent.crt
export NOMAD_CLIENT_KEY=/etc/nomad.d/tls/agent.key
export NOMAD_TOKEN=${data.local_sensitive_file.nomad_token.content}
export CONSUL_HTTP_ADDR=https://localhost:8501
export CONSUL_HTTP_TOKEN="${random_uuid.consul_initial_management_token.result}"
export CONSUL_CACERT=/etc/consul.d/ca.pem
ENV
EXEC
}

View File

@@ -54,6 +54,9 @@ export NOMAD_CLIENT_CERT=${abspath(path.root)}/keys/tls_api_client.crt
export NOMAD_CLIENT_KEY=${abspath(path.root)}/keys/tls_api_client.key
export NOMAD_TOKEN=${data.local_sensitive_file.nomad_token.content}
export NOMAD_E2E=1
export CONSUL_HTTP_ADDR=https://${aws_instance.consul_server.public_ip}:8501
export CONSUL_HTTP_TOKEN=${local_sensitive_file.consul_initial_management_token.content}
export CONSUL_CACERT=${abspath(path.root)}/keys/tls_ca.crt
EOM
}

View File

@@ -52,10 +52,10 @@ resource "null_resource" "install_consul_configs_linux" {
inline = [
"mkdir -p /etc/consul.d",
"sudo rm -rf /etc/consul.d/*",
"sudo mv /tmp/consul_ca.pem /etc/consul.d/ca.pem",
"sudo mv /tmp/consul_client_acl.json /etc/consul.d/acl.json",
"sudo mv /tmp/consul_client.json /etc/consul.d/consul_client.json",
"sudo mv /tmp/consul_client_base.json /etc/consul.d/consul_client_base.json",
"sudo mv /tmp/consul_ca.crt /etc/consul.d/ca.pem",
"sudo mv /tmp/consul_cert.pem /etc/consul.d/cert.pem",
"sudo mv /tmp/consul_cert.key.pem /etc/consul.d/cert.key.pem",
"sudo mv /tmp/consul_client.hcl /etc/consul.d/consul.hcl",
"sudo mv /tmp/consul.service /etc/systemd/system/consul.service",
]
}

View File

@@ -56,23 +56,23 @@ resource "null_resource" "upload_consul_configs" {
}
provisioner "file" {
source = "uploads/shared/consul.d/ca.pem"
destination = "/tmp/consul_ca.pem"
source = "uploads/shared/consul.d/agent_cert.key.pem"
destination = "/tmp/consul_cert.key.pem"
}
provisioner "file" {
source = "uploads/shared/consul.d/consul_client.json"
destination = "/tmp/consul_client.json"
source = "uploads/shared/consul.d/agent_cert.pem"
destination = "/tmp/consul_cert.pem"
}
provisioner "file" {
source = "uploads/shared/consul.d/client_acl.json"
destination = "/tmp/consul_client_acl.json"
source = "keys/tls_ca.crt"
destination = "/tmp/consul_ca.crt"
}
provisioner "file" {
source = "uploads/shared/consul.d/consul_client_base.json"
destination = "/tmp/consul_client_base.json"
source = "uploads/shared/consul.d/clients.hcl"
destination = "/tmp/consul_client.hcl"
}
provisioner "file" {
source = "uploads/shared/consul.d/consul.service"
source = "etc/consul.d/consul.service"
destination = "/tmp/consul.service"
}
}
@@ -89,7 +89,7 @@ resource "null_resource" "upload_nomad_configs" {
timeout = "15m"
}
# created in hcp_consul.tf
# created in consul-clients.tf
provisioner "file" {
source = "uploads/shared/nomad.d/${var.role}-consul.hcl"
destination = "/tmp/consul.hcl"

View File

@@ -0,0 +1,29 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo "waiting for Consul leader to be up..."
while true :
do
consul info && break
echo "Consul server not ready, waiting 5s"
sleep 5
done
consul acl bootstrap || echo "Consul ACLs already bootstrapped"
if [ $(consul info | grep -q "version_metadata = ent") ]; then
echo "writing namespaces"
consul namespace create -name "prod"
consul namespace create -name "dev"
fi
echo "writing Nomad cluster policy and token"
consul acl policy create -name nomad-cluster -rules @${DIR}/nomad-cluster-consul-policy.hcl
consul acl token create -policy-name=nomad-cluster -secret "$NOMAD_CLUSTER_CONSUL_TOKEN"
echo "writing Consul cluster policy and token"
consul acl policy create -name consul-agents -rules @${DIR}/consul-agents-policy.hcl
consul acl token create -policy-name=consul-agents -secret "$CONSUL_AGENT_TOKEN"

View File

@@ -0,0 +1,12 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
# Consul agents only need to register themselves and read services
node "*" {
policy = "write"
}
service_prefix "" {
policy = "read"
}

View File

@@ -0,0 +1,34 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
// The Nomad Client will be registering things into its buddy Consul Client.
// Note: because we also test the use of Consul namespaces, this token must be
// able to register services, read the keystore, and read node data for any
// namespace.
// The operator=write permission is required for creating config entries for
// connect ingress gateways. operator ACLs are not namespaced, though the
// config entries they can generate are.
operator = "write"
agent_prefix "" {
policy = "read"
}
namespace_prefix "" {
// The acl=write permission is required for generating Consul Service Identity
// tokens for consul connect services. Those services could be configured for
// any Consul namespace the job-submitter has access to.
acl = "write"
key_prefix "" {
policy = "read"
}
node_prefix "" {
policy = "read"
}
service_prefix "" {
policy = "write"
}
}

View File

@@ -8,5 +8,8 @@
nomad_local_binary = "../../pkg/linux_amd64/nomad"
nomad_local_binary_client_windows_2016_amd64 = ["../../pkg/windows_amd64/nomad.exe"]
# For testing enterprise, set via --var:
# The Consul server is Consul Enterprise, so provide a license via --var:
# consul_license = <content of Consul license>
# For testing Nomad enterprise, also set via --var:
# nomad_license = <content of Nomad license>

View File

@@ -22,12 +22,12 @@ resource "tls_self_signed_cert" "ca" {
allowed_uses = ["cert_signing"]
}
resource "local_file" "ca_key" {
resource "local_sensitive_file" "ca_key" {
filename = "keys/tls_ca.key"
content = tls_private_key.ca.private_key_pem
}
resource "local_file" "ca_cert" {
resource "local_sensitive_file" "ca_cert" {
filename = "keys/tls_ca.crt"
content = tls_self_signed_cert.ca.cert_pem
}

View File

@@ -53,7 +53,13 @@ variable "nomad_local_binary" {
variable "nomad_license" {
type = string
description = "If nomad_license is set, deploy a license to override the temporary license"
description = "If nomad_license is set, deploy a license"
default = ""
}
variable "consul_license" {
type = string
description = "If consul_license is set, deploy a license"
default = ""
}