e2e: bootstrap vault and provision Nomad with vault tokens (#9010)

Provisions vault with the policies described in the Nomad Vault integration
guide, and drops a configuration file for Nomad vault server configuration
with its token. The vault root token is exposed to the E2E runner so that
tests can write additional policies to vault.
This commit is contained in:
Tim Gross
2020-10-05 09:28:37 -04:00
committed by GitHub
parent ea397cb042
commit 5f87acf6cf
19 changed files with 230 additions and 49 deletions

View File

@@ -2,11 +2,3 @@ server {
enabled = true
bootstrap_expect = 3
}
vault {
enabled = false
address = "http://active.vault.service.consul:8200"
task_token_ttl = "1h"
create_from_role = "nomad-cluster"
token = ""
}

View File

@@ -1,12 +1,15 @@
backend "consul" {
path = "vault/"
address = "{{ GetPrivateIP }}:8500"
cluster_addr = "https://{{ GetPrivateIP }}:8201"
redirect_addr = "http://{{ GetPrivateIP }}:8200"
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = 1
}
listener "tcp" {
address = "{{ GetPrivateIP }}:8200"
cluster_address = "{{ GetPrivateIP }}:8201"
tls_disable = 1
# this autounseal key is created by Terraform in the E2E infrastructure repo
# and should be used only for these tests
seal "awskms" {
region = "us-east-1"
kms_key_id = "74b7e226-c745-4ddd-9b7f-2371024ee37d"
}
# Vault 1.5.4 doesn't have autodiscovery for retry_join on its
# integrated storage yet so we'll just use consul for storage
storage "consul" {}

View File

@@ -2,11 +2,3 @@ server {
enabled = true
bootstrap_expect = 3
}
vault {
enabled = false
address = "http://active.vault.service.consul:8200"
task_token_ttl = "1h"
create_from_role = "nomad-cluster"
token = ""
}

View File

@@ -1,12 +1,15 @@
backend "consul" {
path = "vault/"
address = "{{ GetPrivateIP }}:8500"
cluster_addr = "https://{{ GetPrivateIP }}:8201"
redirect_addr = "http://{{ GetPrivateIP }}:8200"
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = 1
}
listener "tcp" {
address = "{{ GetPrivateIP }}:8200"
cluster_address = "{{ GetPrivateIP }}:8201"
tls_disable = 1
# this autounseal key is created by Terraform in the E2E infrastructure repo
# and should be used only for these tests
seal "awskms" {
region = "us-east-1"
kms_key_id = "74b7e226-c745-4ddd-9b7f-2371024ee37d"
}
# Vault 1.5.4 doesn't have autodiscovery for retry_join on its
# integrated storage yet so we'll just use consul for storage
storage "consul" {}

View File

@@ -42,6 +42,14 @@ resource "aws_security_group" "primary" {
cidr_blocks = ["0.0.0.0/0"]
}
# Vault
ingress {
from_port = 8200
to_port = 8200
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# HDFS NameNode UI
ingress {
from_port = 50070

View File

@@ -8,19 +8,19 @@
resource "null_resource" "bootstrap_nomad_acls" {
depends_on = [module.nomad_server]
triggers = {
script = data.template_file.bootstrap_script.rendered
script = data.template_file.bootstrap_nomad_script.rendered
}
provisioner "local-exec" {
command = data.template_file.bootstrap_script.rendered
command = data.template_file.bootstrap_nomad_script.rendered
}
}
# write the bootstrap token to the keys/ directory (where the ssh key is)
# so that we can read it into the data.local_file later. If not set,
# ensure that it's empty.
data "template_file" "bootstrap_script" {
template = var.nomad_acls ? "NOMAD_ADDR=http://${aws_instance.server.0.public_ip}:4646 ./acls/bootstrap-nomad.sh" : "mkdir -p ${path.root}/keys; echo > ${path.root}/keys/nomad_root_token"
data "template_file" "bootstrap_nomad_script" {
template = var.nomad_acls ? "NOMAD_ADDR=http://${aws_instance.server.0.public_ip}:4646 ./scripts/bootstrap-nomad.sh" : "mkdir -p ${path.root}/keys; echo > ${path.root}/keys/nomad_root_token"
}
data "local_file" "nomad_token" {

View File

@@ -38,7 +38,10 @@ output "environment" {
value = <<EOM
export NOMAD_ADDR=http://${aws_instance.server[0].public_ip}:4646
export CONSUL_HTTP_ADDR=http://${aws_instance.server[0].public_ip}:8500
export VAULT_ADDR=http://${aws_instance.server[0].public_ip}:8200
export NOMAD_E2E=1
export NOMAD_TOKEN=${data.local_file.nomad_token.content}
export VAULT_TOKEN=${data.local_file.vault_token.content}
EOM
}

View File

@@ -182,7 +182,9 @@ if [ -n "$NOMAD_PROFILE" ]; then
fi
if [ $START == "1" ]; then
# sudo systemctl restart vault
if [ "$NOMAD_ROLE" == "server" ]; then
sudo systemctl restart vault
fi
sudo systemctl restart consul
sudo systemctl restart nomad
fi

View File

@@ -6,8 +6,8 @@ set -e
# Will be overwritten at test time with the version specified
NOMADVERSION=0.9.1
CONSULVERSION=1.7.3
VAULTVERSION=1.1.1
CONSULVERSION=1.8.3
VAULTVERSION=1.5.4
NOMAD_PLUGIN_DIR=/opt/nomad/plugins/
@@ -65,6 +65,9 @@ mkdir_for_root /etc/vault.d
mkdir_for_root /opt/vault
sudo mv /tmp/linux/vault.service /etc/systemd/system/vault.service
sudo setcap cap_ipc_lock=+ep /usr/local/bin/vault
sudo useradd --system --home /etc/vault.d --shell /bin/false vault
echo "Configure Nomad"
mkdir_for_root /etc/nomad.d
mkdir_for_root /opt/nomad

View File

@@ -1,16 +1,33 @@
[Unit]
Description=Vault Agent
Description="HashiCorp Vault - A tool for managing secrets"
Documentation=https://www.vaultproject.io/docs/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=/etc/vault.d/vault.hcl
StartLimitIntervalSec=60
StartLimitBurst=3
[Service]
User=vault
Group=vault
ProtectSystem=full
ProtectHome=read-only
PrivateTmp=yes
PrivateDevices=yes
SecureBits=keep-caps
AmbientCapabilities=CAP_IPC_LOCK
Capabilities=CAP_IPC_LOCK+ep
CapabilityBoundingSet=CAP_SYSLOG CAP_IPC_LOCK
NoNewPrivileges=yes
ExecStart=/usr/local/bin/vault server -config=/etc/vault.d/vault.hcl
ExecReload=/bin/kill --signal HUP $MAINPID
KillMode=process
KillSignal=SIGINT
Restart=on-failure
Environment=GOMAXPROCS=nproc
ExecStart=/usr/local/bin/vault server -config="/etc/vault.d/vault.hcl"
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGTERM
User=root
Group=root
RestartSec=5
TimeoutStopSec=30
LimitNOFILE=65536
LimitMEMLOCK=infinity
[Install]
WantedBy=multi-user.target

View File

@@ -22,4 +22,4 @@ echo $NOMAD_TOKEN > "${DIR}/../keys/nomad_root_token"
nomad acl policy apply \
-description "Anonymous policy (full-access)" \
anonymous \
"${DIR}/anonymous.policy.hcl"
"${DIR}/anonymous.nomad_policy.hcl"

View File

@@ -0,0 +1,37 @@
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# unseal vault and get a root operator token; the vault is configured to
# autounseal with AWS KMS
while true :
do
ROOT_TOKEN=$(vault operator init -recovery-shares=1 -recovery-threshold=1 | awk '/Initial Root Token/{print $4}')
if [ ! -z $ROOT_TOKEN ]; then break; fi
sleep 5
done
set -e
export VAULT_TOKEN="$ROOT_TOKEN"
mkdir -p ../keys
echo $VAULT_TOKEN > "${DIR}/../keys/vault_root_token"
# write policies for Nomad to Vault, and then configure Nomad to use the
# token from those policies
vault policy write nomad-server "${DIR}/vault-nomad-server-policy.hcl"
vault write /auth/token/roles/nomad-cluster "@${DIR}/vault-nomad-cluster-role.json"
NOMAD_VAULT_TOKEN=$(vault token create -policy nomad-server -period 72h -orphan | awk '/token /{print $2}')
cat <<EOF > "${DIR}/../keys/nomad_vault.hcl"
vault {
enabled = true
address = "http://active.vault.service.consul:8200"
task_token_ttl = "1h"
create_from_role = "nomad-cluster"
token = "$NOMAD_VAULT_TOKEN"
}
EOF

View File

@@ -0,0 +1,8 @@
{
"disallowed_policies": "nomad-server",
"token_explicit_max_ttl": 0,
"name": "nomad-cluster",
"orphan": true,
"token_period": 259200,
"renewable": true
}

View File

@@ -0,0 +1,41 @@
# Allow creating tokens under "nomad-cluster" role. The role name should be
# updated if "nomad-cluster" is not used.
path "auth/token/create/nomad-cluster" {
capabilities = ["update"]
}
# Allow looking up "nomad-cluster" role. The role name should be updated if
# "nomad-cluster" is not used.
path "auth/token/roles/nomad-cluster" {
capabilities = ["read"]
}
# Allow looking up the token passed to Nomad to validate the token has the
# proper capabilities. This is provided by the "default" policy.
path "auth/token/lookup-self" {
capabilities = ["read"]
}
# Allow looking up incoming tokens to validate they have permissions to access
# the tokens they are requesting. This is only required if
# `allow_unauthenticated` is set to false.
path "auth/token/lookup" {
capabilities = ["update"]
}
# Allow revoking tokens that should no longer exist. This allows revoking
# tokens for dead tasks.
path "auth/token/revoke-accessor" {
capabilities = ["update"]
}
# Allow checking the capabilities of our own token. This is used to validate the
# token upon startup.
path "sys/capabilities-self" {
capabilities = ["update"]
}
# Allow our own token to be renewed.
path "auth/token/renew-self" {
capabilities = ["update"]
}

View File

@@ -6,3 +6,4 @@ windows_client_count = "1"
profile = "full-cluster"
nomad_enterprise = true
nomad_acls = true
vault = true

View File

@@ -6,6 +6,7 @@ windows_client_count = "0"
profile = "dev-cluster"
nomad_acls = false
nomad_enterprise = false
vault = true
# Example overrides:
# nomad_local_binary = "../../pkg/linux_amd/nomad"

View File

@@ -85,6 +85,12 @@ variable "nomad_acls" {
default = false
}
variable "vault" {
type = bool
description = "Bootstrap Vault"
default = false
}
# ----------------------------------------
# If you want to deploy multiple versions you can use these variables to
# provide a list of builds to override the values of nomad_sha, nomad_version,

64
e2e/terraform/vault.tf Normal file
View File

@@ -0,0 +1,64 @@
resource "null_resource" "bootstrap_vault" {
depends_on = [
aws_instance.server,
module.nomad_server
]
triggers = {
script = data.template_file.bootstrap_vault_script.rendered
}
provisioner "local-exec" {
command = data.template_file.bootstrap_vault_script.rendered
}
}
# write the bootstrap token to the keys/ directory (where the ssh key is)
# so that we can read it into the data.local_file later. If not set,
# ensure that it's empty.
data "template_file" "bootstrap_vault_script" {
template = var.vault ? "VAULT_ADDR=http://${aws_instance.server.0.public_ip}:8200 ./scripts/bootstrap-vault.sh" : "mkdir -p ${path.root}/keys; echo > ${path.root}/keys/vault_root_token"
}
data "local_file" "vault_token" {
depends_on = [null_resource.bootstrap_vault]
filename = "${path.root}/keys/vault_root_token"
}
data "local_file" "nomad_vault_config" {
depends_on = [null_resource.bootstrap_vault]
filename = "${path.root}/keys/nomad_vault.hcl"
}
resource "null_resource" "nomad_vault_config" {
depends_on = [
aws_instance.server,
null_resource.bootstrap_vault
]
triggers = {
data = data.local_file.nomad_vault_config.content
}
count = var.server_count
provisioner "file" {
source = "${path.root}/keys/nomad_vault.hcl"
destination = "./nomad_vault.hcl"
}
provisioner "remote-exec" {
inline = [
"sudo mv ./nomad_vault.hcl /etc/nomad.d/nomad_vault.hcl",
"sudo systemctl restart nomad"
]
}
connection {
type = "ssh"
user = "ubuntu"
host = aws_instance.server[count.index].public_ip
port = 22
private_key = file("${path.root}/keys/${local.random_name}.pem")
}
}