mirror of
https://github.com/kemko/nomad.git
synced 2026-01-01 16:05:42 +03:00
E2E with HCP Consul/Vault (#12267)
Use HCP Consul and HCP Vault for the Consul and Vault clusters used in E2E testing. This has the following benefits: * Without the need to support mTLS bootstrapping for Consul and Vault, we can simplify the mTLS configuration by leaning on Terraform instead of janky bash shell scripting. * Vault bootstrapping is no longer required, so we can eliminate even more janky shell scripting * Our E2E exercises HCP, which is important to us as an organization * With the reduction in configurability, we can simplify the Terraform configuration and drop the complicated `provision.sh`/`provision.ps1` scripts we were using previously. We can template Nomad configuration files and upload them with the `file` provisioner. * Packer builds for Linux and Windows become much simpler. tl;dr way less janky shell scripting!
This commit is contained in:
@@ -7,8 +7,8 @@ import (
|
||||
"time"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
uuidparse "github.com/hashicorp/go-uuid"
|
||||
nomadapi "github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/e2e/consulacls"
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
@@ -19,12 +19,8 @@ import (
|
||||
type ConnectACLsE2ETest struct {
|
||||
framework.TC
|
||||
|
||||
// manageConsulACLs is used to 'enable' and 'disable' Consul ACLs in the
|
||||
// Consul Cluster that has been setup for e2e testing.
|
||||
manageConsulACLs consulacls.Manager
|
||||
|
||||
// consulManagementToken is set to the generated Consul ACL token after using
|
||||
// the consul-acls-manage.sh script to enable ACLs.
|
||||
// used to store the root token so we can reset the client back to
|
||||
// it as needed
|
||||
consulManagementToken string
|
||||
|
||||
// things to cleanup after each test case
|
||||
@@ -38,47 +34,12 @@ func (tc *ConnectACLsE2ETest) BeforeAll(f *framework.F) {
|
||||
e2eutil.WaitForLeader(f.T(), tc.Nomad())
|
||||
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 2)
|
||||
|
||||
// Now enable Consul ACLs, the bootstrapping process for which will be
|
||||
// managed automatically if needed.
|
||||
var err error
|
||||
tc.manageConsulACLs, err = consulacls.New(consulacls.DefaultTFStateFile)
|
||||
require.NoError(f.T(), err)
|
||||
tc.enableConsulACLs(f)
|
||||
|
||||
// Validate the consul master token exists, otherwise tests are just
|
||||
// Validate the consul root token exists, otherwise tests are just
|
||||
// going to be a train wreck.
|
||||
tokenLength := len(tc.consulManagementToken)
|
||||
require.Equal(f.T(), 36, tokenLength, "consul master token wrong length")
|
||||
tc.consulManagementToken = os.Getenv(envConsulToken)
|
||||
|
||||
// Validate the CONSUL_HTTP_TOKEN is NOT set, because that will cause
|
||||
// the agent checks to fail (which do not allow having a token set (!)).
|
||||
consulTokenEnv := os.Getenv(envConsulToken)
|
||||
require.Empty(f.T(), consulTokenEnv)
|
||||
|
||||
// Wait for Nomad to be ready _again_, since everything was restarted during
|
||||
// the bootstrap process.
|
||||
e2eutil.WaitForLeader(f.T(), tc.Nomad())
|
||||
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 2)
|
||||
}
|
||||
|
||||
// enableConsulACLs effectively executes `consul-acls-manage.sh enable`, which
|
||||
// will activate Consul ACLs, going through the bootstrap process if necessary.
|
||||
func (tc *ConnectACLsE2ETest) enableConsulACLs(f *framework.F) {
|
||||
tc.consulManagementToken = tc.manageConsulACLs.Enable(f.T())
|
||||
}
|
||||
|
||||
// AfterAll runs after all tests are complete.
|
||||
//
|
||||
// We disable ConsulACLs in here to isolate the use of Consul ACLs only to
|
||||
// test suites that explicitly want to test with them enabled.
|
||||
func (tc *ConnectACLsE2ETest) AfterAll(f *framework.F) {
|
||||
tc.disableConsulACLs(f)
|
||||
}
|
||||
|
||||
// disableConsulACLs effectively executes `consul-acls-manage.sh disable`, which
|
||||
// will de-activate Consul ACLs.
|
||||
func (tc *ConnectACLsE2ETest) disableConsulACLs(f *framework.F) {
|
||||
tc.manageConsulACLs.Disable(f.T())
|
||||
_, err := uuidparse.ParseUUID(tc.consulManagementToken)
|
||||
f.NoError(err, "CONSUL_HTTP_TOKEN not set")
|
||||
}
|
||||
|
||||
// AfterEach does cleanup of Consul ACL objects that were created during each
|
||||
@@ -175,6 +136,7 @@ func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterMasterToken(f *framework.F)
|
||||
// One should never do this in practice, but, it should work.
|
||||
// https://www.consul.io/docs/acl/acl-system.html#builtin-tokens
|
||||
job.ConsulToken = &tc.consulManagementToken
|
||||
job.ID = &jobID
|
||||
|
||||
// Avoid using Register here, because that would actually create and run the
|
||||
// Job which runs the task, creates the SI token, which all needs to be
|
||||
@@ -188,15 +150,20 @@ func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterMasterToken(f *framework.F)
|
||||
func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterMissingOperatorToken(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
t.Skip("we don't have consul.allow_unauthenticated=false set because it would required updating every E2E test to pass a Consul token")
|
||||
|
||||
t.Log("test register Connect job w/ ACLs enabled w/o operator token")
|
||||
|
||||
jobID := "connect" + uuid.Short()
|
||||
tc.jobIDs = append(tc.jobIDs, jobID) // need to clean up if the test fails
|
||||
|
||||
job, err := jobspec.ParseFile(demoConnectJob)
|
||||
f.NoError(err)
|
||||
|
||||
jobAPI := tc.Nomad().Jobs()
|
||||
|
||||
// Explicitly show the ConsulToken is not set
|
||||
job.ConsulToken = nil
|
||||
job.ID = &jobID
|
||||
|
||||
_, _, err = jobAPI.Register(job, nil)
|
||||
f.Error(err)
|
||||
@@ -207,6 +174,8 @@ func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterMissingOperatorToken(f *fra
|
||||
func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterFakeOperatorToken(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
t.Skip("we don't have consul.allow_unauthenticated=false set because it would required updating every E2E test to pass a Consul token")
|
||||
|
||||
t.Log("test register Connect job w/ ACLs enabled w/ operator token")
|
||||
|
||||
policyID := tc.createConsulPolicy(consulPolicy{
|
||||
@@ -217,12 +186,17 @@ func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterFakeOperatorToken(f *framew
|
||||
|
||||
// generate a fake consul token token
|
||||
fakeToken := uuid.Generate()
|
||||
|
||||
jobID := "connect" + uuid.Short()
|
||||
tc.jobIDs = append(tc.jobIDs, jobID) // need to clean up if the test fails
|
||||
|
||||
job := tc.parseJobSpecFile(t, demoConnectJob)
|
||||
|
||||
jobAPI := tc.Nomad().Jobs()
|
||||
|
||||
// deliberately set the fake Consul token
|
||||
job.ConsulToken = &fakeToken
|
||||
job.ID = &jobID
|
||||
|
||||
// should fail, because the token is fake
|
||||
_, _, err := jobAPI.Register(job, nil)
|
||||
|
||||
@@ -49,22 +49,15 @@ func init() {
|
||||
},
|
||||
})
|
||||
|
||||
// Connect tests with Consul ACLs enabled. These are now gated behind the
|
||||
// NOMAD_TEST_CONSUL_ACLS environment variable, because they cause lots of
|
||||
// problems for e2e test flakiness (due to restarting consul, nomad, etc.).
|
||||
//
|
||||
// Run these tests locally when working on Connect.
|
||||
if os.Getenv("NOMAD_TEST_CONSUL_ACLS") == "1" {
|
||||
framework.AddSuites(&framework.TestSuite{
|
||||
Component: "ConnectACLs",
|
||||
CanRunLocal: false,
|
||||
Consul: true,
|
||||
Parallel: false,
|
||||
Cases: []framework.TestCase{
|
||||
new(ConnectACLsE2ETest),
|
||||
},
|
||||
})
|
||||
}
|
||||
framework.AddSuites(&framework.TestSuite{
|
||||
Component: "ConnectACLs",
|
||||
CanRunLocal: false,
|
||||
Consul: true,
|
||||
Parallel: false,
|
||||
Cases: []framework.TestCase{
|
||||
new(ConnectACLsE2ETest),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (tc *ConnectE2ETest) BeforeAll(f *framework.F) {
|
||||
|
||||
@@ -42,7 +42,6 @@ func init() {
|
||||
new(ScriptChecksE2ETest),
|
||||
new(CheckRestartE2ETest),
|
||||
new(OnUpdateChecksTest),
|
||||
new(ConsulNamespacesE2ETest),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package consul
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
capi "github.com/hashicorp/consul/api"
|
||||
@@ -37,20 +38,40 @@ var (
|
||||
allConsulNamespaces = append(consulNamespaces, "default")
|
||||
)
|
||||
|
||||
func init() {
|
||||
framework.AddSuites(&framework.TestSuite{
|
||||
Component: "ConsulNamespaces",
|
||||
CanRunLocal: true,
|
||||
Consul: true,
|
||||
Cases: []framework.TestCase{
|
||||
new(ConsulNamespacesE2ETest),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type ConsulNamespacesE2ETest struct {
|
||||
framework.TC
|
||||
|
||||
jobIDs []string
|
||||
|
||||
// cToken contains the Consul global-management token during ACL enabled
|
||||
// tests (i.e. ConsulNamespacesE2ETestACLs which embeds ConsulNamespacesE2ETest).
|
||||
// cToken contains the Consul global-management token
|
||||
cToken string
|
||||
|
||||
// created policy and token IDs should be set here so they can be cleaned
|
||||
// up after each test case, organized by namespace
|
||||
policyIDs map[string][]string
|
||||
tokenIDs map[string][]string
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) BeforeAll(f *framework.F) {
|
||||
tc.policyIDs = make(map[string][]string)
|
||||
tc.tokenIDs = make(map[string][]string)
|
||||
|
||||
e2eutil.WaitForLeader(f.T(), tc.Nomad())
|
||||
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 1)
|
||||
|
||||
tc.cToken = os.Getenv("CONSUL_HTTP_TOKEN")
|
||||
|
||||
// create a set of consul namespaces in which to register services
|
||||
e2eutil.CreateConsulNamespaces(f.T(), tc.Consul(), consulNamespaces)
|
||||
|
||||
@@ -61,9 +82,6 @@ func (tc *ConsulNamespacesE2ETest) BeforeAll(f *framework.F) {
|
||||
value := fmt.Sprintf("ns_%s", namespace)
|
||||
e2eutil.PutConsulKey(f.T(), tc.Consul(), namespace, "ns-kv-example", value)
|
||||
}
|
||||
|
||||
// make the unused variable linter happy in oss
|
||||
f.T().Log("Consul global-management token:", tc.cToken)
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) AfterAll(f *framework.F) {
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
package consul
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
capi "github.com/hashicorp/consul/api"
|
||||
@@ -17,6 +18,26 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) AfterEach(f *framework.F) {
|
||||
if os.Getenv("NOMAD_TEST_SKIPCLEANUP") == "1" {
|
||||
return
|
||||
}
|
||||
|
||||
// cleanup jobs
|
||||
for _, id := range tc.jobIDs {
|
||||
_, _, err := tc.Nomad().Jobs().Deregister(id, true, nil)
|
||||
f.NoError(err)
|
||||
}
|
||||
|
||||
// do garbage collection
|
||||
err := tc.Nomad().System().GarbageCollect()
|
||||
f.NoError(err)
|
||||
|
||||
// reset accumulators
|
||||
tc.tokenIDs = make(map[string][]string)
|
||||
tc.policyIDs = make(map[string][]string)
|
||||
}
|
||||
|
||||
func (tc *ConsulNamespacesE2ETest) TestConsulRegisterGroupServices(f *framework.F) {
|
||||
nomadClient := tc.Nomad()
|
||||
jobID := "cns-group-services"
|
||||
|
||||
@@ -44,7 +44,6 @@ scrape_configs:
|
||||
|
||||
consul_sd_configs:
|
||||
- server: '{{ env "NOMAD_IP_prometheus_ui" }}:8500'
|
||||
services: ['nomad-client', 'nomad']
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: ['__meta_consul_tags']
|
||||
|
||||
@@ -51,6 +51,8 @@ func (tc *NodeDrainE2ETest) AfterEach(f *framework.F) {
|
||||
for _, id := range tc.nodeIDs {
|
||||
_, err := e2e.Command("nomad", "node", "drain", "-disable", "-yes", id)
|
||||
f.Assert().NoError(err)
|
||||
_, err = e2e.Command("nomad", "node", "eligibility", "-enable", id)
|
||||
f.Assert().NoError(err)
|
||||
}
|
||||
tc.nodeIDs = []string{}
|
||||
|
||||
@@ -140,7 +142,7 @@ func (tc *NodeDrainE2ETest) TestNodeDrainEphemeralMigrate(f *framework.F) {
|
||||
// match the old allocation, not the running one
|
||||
var got string
|
||||
var fsErr error
|
||||
testutil.WaitForResultRetries(500, func() (bool, error) {
|
||||
testutil.WaitForResultRetries(10, func() (bool, error) {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
for _, alloc := range allocs {
|
||||
if alloc["Status"] == "running" && alloc["Node ID"] != nodeID && alloc["ID"] != oldAllocID {
|
||||
@@ -149,18 +151,15 @@ func (tc *NodeDrainE2ETest) TestNodeDrainEphemeralMigrate(f *framework.F) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if strings.TrimSpace(got) == oldAllocID {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, fmt.Errorf("expected %q, got %q", oldAllocID, got)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, fmt.Errorf("did not find a migrated alloc")
|
||||
return false, fmt.Errorf("missing expected allocation")
|
||||
}, func(e error) {
|
||||
fsErr = e
|
||||
})
|
||||
f.NoError(fsErr, "node drained but migration failed")
|
||||
f.NoError(fsErr, "could not get allocation data")
|
||||
f.Equal(oldAllocID, strings.TrimSpace(got), "node drained but migration failed")
|
||||
}
|
||||
|
||||
// TestNodeDrainIgnoreSystem tests that system jobs are left behind when the
|
||||
|
||||
54
e2e/terraform/.terraform.lock.hcl
generated
54
e2e/terraform/.terraform.lock.hcl
generated
@@ -19,6 +19,24 @@ provider "registry.terraform.io/hashicorp/aws" {
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/consul" {
|
||||
version = "2.14.0"
|
||||
hashes = [
|
||||
"h1:fbE0ZM8D8Q9m+BsHiYMAO+DQLwXOJoAlg8XXUq5FIrY=",
|
||||
"zh:06dcca1f76b839af8f86c7b6f65b944003a7a35b30b865b3884f48e2c42f9aee",
|
||||
"zh:16111df6a485e21cee6ca33cb863434baa1ca360c819c8e2af85e465c1361d2b",
|
||||
"zh:26b59c82ac2861b2651c1fa31955c3e7790e3c2d5d097f22aa34d3c294da63cf",
|
||||
"zh:70fd6853099126a602d5ac26caa80214a4a8a38f0cad8a5e3b7bef49923419d3",
|
||||
"zh:7d4f0061d6fb86e0a5639ed02381063b868245082ec4e3a461bcda964ed00fcc",
|
||||
"zh:a48cbf57d6511922362d5b0f76f449fba7a550c9d0702635fabb43b4f0a09fc0",
|
||||
"zh:bb54994a53dd8e1ff84ca50742ce893863dc166fd41b91d951f4cb89fe6a6bc0",
|
||||
"zh:bc61b19ee3c8d55a9915a3ad84203c87bfd0d57eca8eec788524b14e8b67f090",
|
||||
"zh:cbe3238e756ada23c1e7c97c42a5c72bf810dc5bd1265c9f074c3e739d1090b0",
|
||||
"zh:e30198054239eab46493e59956b9cd8c376c3bbd9515ac102a96d1fbd32e423f",
|
||||
"zh:e74365dba529a0676107e413986d7be81c2125c197754ce69e3e89d8daa53153",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/external" {
|
||||
version = "2.1.0"
|
||||
hashes = [
|
||||
@@ -37,6 +55,24 @@ provider "registry.terraform.io/hashicorp/external" {
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/hcp" {
|
||||
version = "0.23.1"
|
||||
hashes = [
|
||||
"h1:OeCY9pcVhlaVbONZ8fQ7Dgm/hFmkhmXXWJaAnLitkqM=",
|
||||
"zh:02c661913643a56ba640432a0bcdf2824218a3598a243da4fd6079238164e7f6",
|
||||
"zh:2359656d097fb1164bfe961314dafdac80f272c9bc0e359a6e43f5467a231e8f",
|
||||
"zh:2463ac7e40702cbb4ebd4a397964b87de1b65dcb6982eab32f2bd40c9a5b1294",
|
||||
"zh:420ef5061b936741a469b4e02dfe9ee047d928c294647e8c5f93e4a8890997a3",
|
||||
"zh:5eba99a60a3366cd97b70a4ee26cb4489ca320699010bd03ca726772a10089c1",
|
||||
"zh:82419028e8691acbb2c3f7e7d8c2c931ee03d6b3df6b97f5b965365f0a90392f",
|
||||
"zh:93b7eecff21055c8b46d5a69ba982abc76479f73a78f67fc86fc86ba56f630cd",
|
||||
"zh:c151238e96c30126529ccc42bf06d84f73fcd87ee40dbb493be8d85ef0efd453",
|
||||
"zh:d476ebe1a628abd08d11354a13e5b8aa708d820dcad78587b8440d12f0e219ef",
|
||||
"zh:e48130a57cf930755983b861768b8e88767e11df33640386d03496d551fb64ce",
|
||||
"zh:ed9cf5173ea09010ef5ecae452dd3da52054a659e23af8d8e1ed6a45270cd531",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/http" {
|
||||
version = "2.1.0"
|
||||
hashes = [
|
||||
@@ -143,3 +179,21 @@ provider "registry.terraform.io/hashicorp/tls" {
|
||||
"zh:fc1e12b713837b85daf6c3bb703d7795eaf1c5177aebae1afcf811dd7009f4b0",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/vault" {
|
||||
version = "3.3.1"
|
||||
hashes = [
|
||||
"h1:4u5bqCcflSWqJgr3+/02HtP+ZuF4tUaEIUMTW0nv98k=",
|
||||
"zh:3e1866037f43c1083ff825dce2a9e3853c757bb0121c5ae528ee3cf3f99b4113",
|
||||
"zh:49636cc5c4939134e098c4ec0163c41fae103f24d7e1e8fc0432f8ad93d596a0",
|
||||
"zh:5258a7001719c4aeb84f4c4da7115b795da4794754938a3c4176a4b578fe93a1",
|
||||
"zh:7461738691e2e8ea91aba73d4351cfbc30fcaedcf0e332c9d35ef215f93aa282",
|
||||
"zh:815529478e33a6727273b08340a4c62c9aeb3da02abf8f091bb4f545c8451fce",
|
||||
"zh:8e6fede9f5e25b507faf6cacd61b997035b8b62859245861149ddb2990ada8eb",
|
||||
"zh:9acc2387084b9c411e264c4351633bc82f9c4e420f8e6bbad9f87b145351f929",
|
||||
"zh:b9e4af3b06386ceed720f0163a1496088c154aa1430ae072c525ffefa4b37891",
|
||||
"zh:c7d5dfb8f8536694db6740e2a4afd2d681b60b396ded469282524c62ce154861",
|
||||
"zh:d0850be710c6fd682634a2f823beed0164231cc873b1dc09038aa477c926f57c",
|
||||
"zh:e90c2cba9d89db5eab295b2f046f24a53f23002bcfe008633d398fb3fa16d941",
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,32 +1,52 @@
|
||||
NOMAD_SHA ?= $(shell git rev-parse HEAD)
|
||||
PKG_PATH = $(shell pwd)/../../pkg/linux_amd64/nomad
|
||||
LICENSE_PATH ?=
|
||||
|
||||
# The version of nomad that gets deployed depends on an order of precedence
|
||||
# linked below
|
||||
# https://github.com/hashicorp/nomad/blob/main/e2e/terraform/README.md#nomad-version
|
||||
dev-cluster:
|
||||
# deploy for quick local development testing
|
||||
|
||||
plan:
|
||||
terraform plan \
|
||||
-var="nomad_local_binary=$(PKG_PATH)" \
|
||||
-var="volumes=false" \
|
||||
-var="client_count_ubuntu_bionic_amd64=2" \
|
||||
-var="client_count_windows_2016_amd64=0"
|
||||
|
||||
apply:
|
||||
terraform apply -auto-approve \
|
||||
-var="nomad_sha=$(NOMAD_SHA)"
|
||||
terraform output environment
|
||||
-var="nomad_local_binary=$(PKG_PATH)" \
|
||||
-var="volumes=false" \
|
||||
-var="client_count_ubuntu_bionic_amd64=2" \
|
||||
-var="client_count_windows_2016_amd64=0"
|
||||
|
||||
dev-cluster-from-local:
|
||||
terraform apply -auto-approve \
|
||||
-var="nomad_local_binary=$(PKG_PATH)"
|
||||
terraform output environment
|
||||
clean: destroy tidy
|
||||
|
||||
clean:
|
||||
destroy:
|
||||
terraform destroy -auto-approve \
|
||||
-var="nomad_local_binary=$(PKG_PATH)" \
|
||||
-var="client_count_ubuntu_bionic_amd64=2" \
|
||||
-var="client_count_windows_2016_amd64=0"
|
||||
|
||||
# deploy what's in E2E nightly
|
||||
|
||||
plan_full:
|
||||
terraform plan
|
||||
|
||||
apply_full:
|
||||
@terraform apply -auto-approve \
|
||||
-var="nomad_license=$(shell cat $(LICENSE_PATH))"
|
||||
|
||||
clean_full: destroy_full tidy
|
||||
|
||||
destroy_full:
|
||||
terraform destroy -auto-approve
|
||||
|
||||
full-cluster:
|
||||
terraform apply -auto-approve \
|
||||
-var-file=terraform.full.tfvars \
|
||||
-var="nomad_sha=$(NOMAD_SHA)"
|
||||
# util
|
||||
|
||||
plan-dev-cluster:
|
||||
terraform plan \
|
||||
-var="nomad_sha=$(NOMAD_SHA)"
|
||||
|
||||
plan-full-cluster:
|
||||
terraform plan \
|
||||
-var-file=terraform.full.tfvars \
|
||||
-var="nomad_sha=$(NOMAD_SHA)"
|
||||
# don't run this by default in plan/apply because it prevents you from
|
||||
# updating a running cluster
|
||||
tidy:
|
||||
rm -rf keys
|
||||
mkdir keys
|
||||
chmod 0700 keys
|
||||
rm -rf uploads/*
|
||||
git checkout uploads/README.md
|
||||
rm -f terraform.tfstate.*.backup
|
||||
|
||||
@@ -1,23 +1,46 @@
|
||||
# Terraform infrastructure
|
||||
|
||||
This folder contains Terraform resources for provisioning a Nomad cluster on
|
||||
EC2 instances on AWS to use as the target of end-to-end tests.
|
||||
This folder contains Terraform resources for provisioning a Nomad
|
||||
cluster on EC2 instances on AWS to use as the target of end-to-end
|
||||
tests.
|
||||
|
||||
Terraform provisions the AWS infrastructure assuming that EC2 AMIs have
|
||||
already been built via Packer. It deploys a specific build of Nomad to the
|
||||
cluster along with configuration files for Nomad, Consul, and Vault.
|
||||
Terraform provisions the AWS infrastructure assuming that EC2 AMIs
|
||||
have already been built via Packer and HCP Consul and HCP Vault
|
||||
clusters are already running. It deploys a build of Nomad from your
|
||||
local machine along with configuration files.
|
||||
|
||||
## Setup
|
||||
|
||||
You'll need Terraform 0.14.7+, as well as AWS credentials to create the Nomad
|
||||
cluster. This Terraform stack assumes that an appropriate instance role has
|
||||
been configured elsewhere and that you have the ability to `AssumeRole` into
|
||||
the AWS account.
|
||||
You'll need a recent version of Terraform (1.1+ recommended), as well
|
||||
as AWS credentials to create the Nomad cluster and credentials for
|
||||
HCP. This Terraform stack assumes that an appropriate instance role
|
||||
has been configured elsewhere and that you have the ability to
|
||||
`AssumeRole` into the AWS account.
|
||||
|
||||
Optionally, edit the `terraform.tfvars` file to change the number of Linux
|
||||
clients or Windows clients. The Terraform variables file
|
||||
`terraform.full.tfvars` is for the nightly E2E test run and deploys a larger,
|
||||
more diverse set of test targets.
|
||||
Configure the following environment variables. For HashiCorp Nomad
|
||||
developers, this configuration can be found in 1Pass in the Nomad
|
||||
team's vault under `nomad-e2e`.
|
||||
|
||||
```
|
||||
export HCP_CLIENT_ID=
|
||||
export HCP_CLIENT_SECRET=
|
||||
export CONSUL_HTTP_TOKEN=
|
||||
export CONSUL_HTTP_ADDR=
|
||||
```
|
||||
|
||||
The Vault admin token will expire after 6 hours. If you haven't
|
||||
created one already use the separate Terraform configuration found in
|
||||
the `hcp-vault-auth` directory. The following will set the correct
|
||||
values for `VAULT_TOKEN`, `VAULT_ADDR`, and `VAULT_NAMESPACE`:
|
||||
|
||||
```
|
||||
cd ./hcp-vault-auth
|
||||
terraform apply --auto-approve
|
||||
$(terraform output environment --raw)
|
||||
```
|
||||
|
||||
Optionally, edit the `terraform.tfvars` file to change the number of
|
||||
Linux clients or Windows clients.
|
||||
|
||||
```hcl
|
||||
region = "us-east-1"
|
||||
@@ -25,9 +48,12 @@ instance_type = "t2.medium"
|
||||
server_count = "3"
|
||||
client_count_ubuntu_bionic_amd64 = "4"
|
||||
client_count_windows_2016_amd64 = "1"
|
||||
profile = "dev-cluster"
|
||||
```
|
||||
|
||||
Optionally, edit the `nomad_local_binary` variable in the
|
||||
`terraform.tfvars` file to change the path to the local binary of
|
||||
Nomad you'd like to upload.
|
||||
|
||||
Run Terraform apply to deploy the infrastructure:
|
||||
|
||||
```sh
|
||||
@@ -40,66 +66,23 @@ terraform apply
|
||||
> where the ssh service isn't yet ready. That's ok and expected; they'll get
|
||||
> retried. In particular, Windows instances can take a few minutes before ssh
|
||||
> is ready.
|
||||
>
|
||||
> Also note: When ACLs are being bootstrapped, you may see "No cluster
|
||||
> leader" in the output several times while the ACL bootstrap script
|
||||
> polls the cluster to start and and elect a leader.
|
||||
|
||||
## Nomad Version
|
||||
## Configuration
|
||||
|
||||
You'll need to pass one of the following variables in either your
|
||||
`terraform.tfvars` file or as a command line argument (ex. `terraform apply
|
||||
-var 'nomad_version=0.10.2+ent'`)
|
||||
The files in `etc` are template configuration files for Nomad and the
|
||||
Consul agent. Terraform will render these files to the `uploads`
|
||||
folder and upload them to the cluster during provisioning.
|
||||
|
||||
* `nomad_local_binary`: provision this specific local binary of Nomad. This is
|
||||
a path to a Nomad binary on your own host. Ex. `nomad_local_binary =
|
||||
"/home/me/nomad"`. This setting overrides `nomad_version`.
|
||||
* `nomad_url`: provision this version from a remote archived binary, e.g. `build-binaries` CircleCI artifacts zip file urls.
|
||||
* `nomad_version`: provision this version from
|
||||
[releases.hashicorp.com](https://releases.hashicorp.com/nomad). Ex. `nomad_version
|
||||
= "0.10.2+ent"`
|
||||
|
||||
If you want to deploy the Enterprise build, include `-var
|
||||
'nomad_enterprise=true'`.
|
||||
|
||||
If you want to bootstrap Nomad ACLs, include `-var 'nomad_acls=true'`.
|
||||
|
||||
> Note: If you bootstrap ACLs you will see "No cluster leader" in the output
|
||||
> several times while the ACL bootstrap script polls the cluster to start and
|
||||
> and elect a leader.
|
||||
|
||||
## Profiles
|
||||
|
||||
The `profile` field selects from a set of configuration files for Nomad,
|
||||
Consul, and Vault by uploading the files found in `./config/<profile>`. The
|
||||
standard profiles are as follows:
|
||||
|
||||
* `full-cluster`: This profile is used for nightly E2E testing. It assumes at
|
||||
least 3 servers and includes a unique config for each Nomad client.
|
||||
* `dev-cluster`: This profile is used for developer testing of a more limited
|
||||
set of clients. It assumes at least 3 servers but uses the one config for
|
||||
all the Linux Nomad clients and one config for all the Windows Nomad
|
||||
clients.
|
||||
|
||||
You may create additional profiles for testing more complex interactions between features.
|
||||
You can build your own custom profile by writing config files to the
|
||||
`./config/<custom name>` directory.
|
||||
|
||||
For each profile, application (Nomad, Consul, Vault), and agent type
|
||||
(`server`, `client_linux`, or `client_windows`), the agent gets the following
|
||||
configuration files, ignoring any that are missing.
|
||||
|
||||
* `./config/<profile>/<application>/*`: base configurations shared between all
|
||||
servers and clients.
|
||||
* `./config/<profile>/<application>/<type>/*`: base configurations shared
|
||||
between all agents of this type.
|
||||
* `./config/<profile>/<application>/<type>/indexed/*<index>.<ext>`: a
|
||||
configuration for that particular agent, where the index value is the index
|
||||
of that agent within the total count.
|
||||
|
||||
For example, with the `full-cluster` profile, 2nd Nomad server would get the
|
||||
following configuration files:
|
||||
* `./config/full-cluster/nomad/base.hcl`
|
||||
* `./config/full-cluster/nomad/server/indexed/server-1.hcl`
|
||||
|
||||
The directory `./config/full-cluster/nomad/server` has no configuration files,
|
||||
so that's safely skipped.
|
||||
* `etc/nomad.d` are the Nomad configuration files.
|
||||
* `base.hcl`, `tls.hcl`, `consul.hcl`, and `vault.hcl` are shared.
|
||||
* `server-linux.hcl`, `client-linux.hcl`, and `client-windows.hcl` are role and platform specific.
|
||||
* `client-linux-0.hcl`, etc. are specific to individual instances.
|
||||
* `etc/consul.d` are the Consul agent configuration files.
|
||||
* `etc/acls` are ACL policy files for Consul and Vault.
|
||||
|
||||
## Outputs
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
locals {
|
||||
ami_prefix = "nomad-e2e-v2"
|
||||
ami_prefix = "nomad-e2e-v3"
|
||||
}
|
||||
|
||||
resource "aws_instance" "server" {
|
||||
@@ -15,7 +15,6 @@ resource "aws_instance" "server" {
|
||||
tags = {
|
||||
Name = "${local.random_name}-server-${count.index}"
|
||||
ConsulAutoJoin = "auto-join-${local.random_name}"
|
||||
SHA = var.nomad_sha
|
||||
User = data.aws_caller_identity.current.arn
|
||||
}
|
||||
}
|
||||
@@ -33,7 +32,6 @@ resource "aws_instance" "client_ubuntu_bionic_amd64" {
|
||||
tags = {
|
||||
Name = "${local.random_name}-client-ubuntu-bionic-amd64-${count.index}"
|
||||
ConsulAutoJoin = "auto-join-${local.random_name}"
|
||||
SHA = var.nomad_sha
|
||||
User = data.aws_caller_identity.current.arn
|
||||
}
|
||||
}
|
||||
@@ -53,7 +51,6 @@ resource "aws_instance" "client_windows_2016_amd64" {
|
||||
tags = {
|
||||
Name = "${local.random_name}-client-windows-2016-${count.index}"
|
||||
ConsulAutoJoin = "auto-join-${local.random_name}"
|
||||
SHA = var.nomad_sha
|
||||
User = data.aws_caller_identity.current.arn
|
||||
}
|
||||
}
|
||||
|
||||
1
e2e/terraform/config/.gitignore
vendored
1
e2e/terraform/config/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
custom/*
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"recursors": ["172.31.0.2"],
|
||||
"retry_join": ["provider=aws tag_key=ConsulAutoJoin tag_value=auto-join"]
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"log_level": "INFO",
|
||||
"data_dir": "/opt/consul/data",
|
||||
"bind_addr": "0.0.0.0",
|
||||
"client_addr": "0.0.0.0",
|
||||
"advertise_addr": "{{ GetPrivateIP }}",
|
||||
"connect": {
|
||||
"enabled": true
|
||||
},
|
||||
"ports": {
|
||||
"http": -1,
|
||||
"https": 8501,
|
||||
"grpc": 8502
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"server": true,
|
||||
"ui": true,
|
||||
"bootstrap_expect": 3,
|
||||
"service": {
|
||||
"name": "consul"
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
enable_debug = true
|
||||
|
||||
log_level = "debug"
|
||||
|
||||
data_dir = "/opt/nomad/data"
|
||||
|
||||
bind_addr = "0.0.0.0"
|
||||
|
||||
consul {
|
||||
address = "127.0.0.1:8500"
|
||||
}
|
||||
|
||||
telemetry {
|
||||
collection_interval = "1s"
|
||||
disable_hostname = true
|
||||
prometheus_metrics = true
|
||||
publish_allocation_metrics = true
|
||||
publish_node_metrics = true
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
plugin_dir = "/opt/nomad/plugins"
|
||||
|
||||
client {
|
||||
enabled = true
|
||||
|
||||
options {
|
||||
# Allow jobs to run as root
|
||||
"user.denylist" = ""
|
||||
}
|
||||
|
||||
host_volume "shared_data" {
|
||||
path = "/srv/data"
|
||||
}
|
||||
}
|
||||
|
||||
plugin "nomad-driver-podman" {
|
||||
config {
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
plugin "nomad-driver-ecs" {
|
||||
config {
|
||||
enabled = true
|
||||
cluster = "nomad-rtd-e2e"
|
||||
region = "us-east-1"
|
||||
}
|
||||
}
|
||||
|
||||
plugin "raw_exec" {
|
||||
config {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
plugin "docker" {
|
||||
config {
|
||||
allow_privileged = true
|
||||
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "http://active.vault.service.consul:8200"
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
enable_debug = true
|
||||
|
||||
log_level = "debug"
|
||||
|
||||
log_file = "C:\\opt\\nomad\\nomad.log"
|
||||
|
||||
data_dir = "C:\\opt\\nomad\\data"
|
||||
|
||||
bind_addr = "0.0.0.0"
|
||||
|
||||
# Enable the client
|
||||
client {
|
||||
enabled = true
|
||||
|
||||
options {
|
||||
# Allow rawexec jobs
|
||||
"driver.raw_exec.enable" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
consul {
|
||||
address = "127.0.0.1:8500"
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "http://active.vault.service.consul:8200"
|
||||
}
|
||||
|
||||
telemetry {
|
||||
collection_interval = "1s"
|
||||
disable_hostname = true
|
||||
prometheus_metrics = true
|
||||
publish_allocation_metrics = true
|
||||
publish_node_metrics = true
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
listener "tcp" {
|
||||
address = "0.0.0.0:8200"
|
||||
tls_disable = 1
|
||||
}
|
||||
|
||||
# this autounseal key is created by Terraform in the E2E infrastructure repo
|
||||
# and should be used only for these tests
|
||||
seal "awskms" {
|
||||
region = "us-east-1"
|
||||
kms_key_id = "74b7e226-c745-4ddd-9b7f-2371024ee37d"
|
||||
}
|
||||
|
||||
# Vault 1.5.4 doesn't have autodiscovery for retry_join on its
|
||||
# integrated storage yet so we'll just use consul for storage
|
||||
storage "consul" {}
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"recursors": ["172.31.0.2"],
|
||||
"retry_join": ["provider=aws tag_key=ConsulAutoJoin tag_value=auto-join"]
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"server": true,
|
||||
"ui": true,
|
||||
"bootstrap_expect": 3,
|
||||
"service": {
|
||||
"name": "consul"
|
||||
}
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
# Enable the client
|
||||
client {
|
||||
enabled = true
|
||||
|
||||
meta {
|
||||
"rack" = "r1"
|
||||
}
|
||||
|
||||
host_volume "shared_data" {
|
||||
path = "/srv/data"
|
||||
}
|
||||
}
|
||||
|
||||
plugin_dir = "/opt/nomad/plugins"
|
||||
plugin "nomad-driver-podman" {
|
||||
config {
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
plugin "nomad-driver-ecs" {
|
||||
config {
|
||||
enabled = true
|
||||
cluster = "nomad-rtd-e2e"
|
||||
region = "us-east-1"
|
||||
}
|
||||
}
|
||||
|
||||
plugin "raw_exec" {
|
||||
config {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
plugin "docker" {
|
||||
config {
|
||||
allow_privileged = true
|
||||
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "http://active.vault.service.consul:8200"
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
datacenter = "dc2"
|
||||
|
||||
client {
|
||||
enabled = true
|
||||
|
||||
meta {
|
||||
"rack" = "r1"
|
||||
}
|
||||
}
|
||||
|
||||
plugin_dir = "/opt/nomad/plugins"
|
||||
plugin "nomad-driver-podman" {
|
||||
config {
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
plugin "raw_exec" {
|
||||
config {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
plugin "docker" {
|
||||
config {
|
||||
allow_privileged = true
|
||||
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "http://active.vault.service.consul:8200"
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
datacenter = "dc2"
|
||||
|
||||
client {
|
||||
enabled = true
|
||||
|
||||
meta {
|
||||
"rack" = "r2"
|
||||
}
|
||||
}
|
||||
|
||||
plugin "raw_exec" {
|
||||
config {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
plugin "docker" {
|
||||
config {
|
||||
allow_privileged = true
|
||||
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "http://active.vault.service.consul:8200"
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
enable_debug = true
|
||||
|
||||
log_level = "debug"
|
||||
|
||||
log_file = "C:\\opt\\nomad\\nomad.log"
|
||||
|
||||
data_dir = "C:\\opt\\nomad\\data"
|
||||
|
||||
bind_addr = "0.0.0.0"
|
||||
|
||||
# Enable the client
|
||||
client {
|
||||
enabled = true
|
||||
|
||||
options {
|
||||
# Allow rawexec jobs
|
||||
"driver.raw_exec.enable" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
consul {
|
||||
address = "127.0.0.1:8500"
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "http://active.vault.service.consul:8200"
|
||||
}
|
||||
|
||||
telemetry {
|
||||
collection_interval = "1s"
|
||||
disable_hostname = true
|
||||
prometheus_metrics = true
|
||||
publish_allocation_metrics = true
|
||||
publish_node_metrics = true
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
server {
|
||||
enabled = true
|
||||
bootstrap_expect = 3
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
listener "tcp" {
|
||||
address = "0.0.0.0:8200"
|
||||
tls_disable = 1
|
||||
}
|
||||
|
||||
# this autounseal key is created by Terraform in the E2E infrastructure repo
|
||||
# and should be used only for these tests
|
||||
seal "awskms" {
|
||||
region = "us-east-1"
|
||||
kms_key_id = "74b7e226-c745-4ddd-9b7f-2371024ee37d"
|
||||
}
|
||||
|
||||
# Vault 1.5.4 doesn't have autodiscovery for retry_join on its
|
||||
# integrated storage yet so we'll just use consul for storage
|
||||
storage "consul" {}
|
||||
@@ -1,5 +0,0 @@
|
||||
### Shared configs
|
||||
|
||||
The only configurations that should go here are ones that we want to be able
|
||||
to toggle on/off for any profile. Adding a new configuration here requires
|
||||
adding a flag to the provision scripts as well to symlink it.
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"verify_incoming": true,
|
||||
"verify_outgoing": true,
|
||||
"verify_server_hostname": true,
|
||||
"ca_file": "/etc/consul.d/tls/ca.crt",
|
||||
"cert_file": "/etc/consul.d/tls/agent.crt",
|
||||
"key_file": "/etc/consul.d/tls/agent.key",
|
||||
"ports": {
|
||||
"https": 8501
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
acl {
|
||||
enabled = true
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
tls {
|
||||
http = true
|
||||
rpc = true
|
||||
|
||||
ca_file = "/etc/nomad.d/tls/ca.crt"
|
||||
cert_file = "/etc/nomad.d/tls/agent.crt"
|
||||
key_file = "/etc/nomad.d/tls/agent.key"
|
||||
|
||||
verify_server_hostname = true
|
||||
verify_https_client = true
|
||||
}
|
||||
|
||||
consul {
|
||||
address = "127.0.0.1:8501"
|
||||
ssl = true
|
||||
|
||||
ca_file = "/etc/nomad.d/tls/ca.crt"
|
||||
cert_file = "/etc/nomad.d/tls/agent.crt"
|
||||
key_file = "/etc/nomad.d/tls/agent.key"
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "https://active.vault.service.consul:8200"
|
||||
|
||||
ca_file = "/etc/nomad.d/tls/ca.crt"
|
||||
cert_file = "/etc/nomad.d/tls/agent.crt"
|
||||
key_file = "/etc/nomad.d/tls/agent.key"
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
listener "tcp" {
|
||||
address = "0.0.0.0:8200"
|
||||
|
||||
tls_disable = false
|
||||
tls_require_and_verify_client_cert = true
|
||||
|
||||
tls_client_ca_file = "/etc/vault.d/tls/ca.crt"
|
||||
tls_cert_file = "/etc/vault.d/tls/agent.crt"
|
||||
tls_key_file = "/etc/vault.d/tls/agent.key"
|
||||
}
|
||||
|
||||
# this autounseal key is created by Terraform in the E2E infrastructure repo
|
||||
# and should be used only for these tests
|
||||
seal "awskms" {
|
||||
region = "us-east-1"
|
||||
kms_key_id = "74b7e226-c745-4ddd-9b7f-2371024ee37d"
|
||||
}
|
||||
|
||||
storage "consul" {
|
||||
address = "127.0.0.1:8501"
|
||||
scheme = "https"
|
||||
|
||||
tls_ca_file = "/etc/vault.d/tls/ca.crt"
|
||||
tls_cert_file = "/etc/vault.d/tls/agent.crt"
|
||||
tls_key_file = "/etc/vault.d/tls/agent.key"
|
||||
}
|
||||
32
e2e/terraform/etc/acls/consul/consul-agent-policy.hcl
Normal file
32
e2e/terraform/etc/acls/consul/consul-agent-policy.hcl
Normal file
@@ -0,0 +1,32 @@
|
||||
# TODO: because Nomad should own most of these interactions, I think
|
||||
# it might be possible to reduce this to:
|
||||
#
|
||||
# node_prefix "" {
|
||||
# policy = write
|
||||
# }
|
||||
|
||||
acl = "write"
|
||||
|
||||
agent_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
|
||||
event_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
|
||||
key_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
|
||||
node_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
|
||||
query_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
|
||||
service_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
31
e2e/terraform/etc/acls/consul/nomad-client-policy.hcl
Normal file
31
e2e/terraform/etc/acls/consul/nomad-client-policy.hcl
Normal file
@@ -0,0 +1,31 @@
|
||||
// The Nomad Client will be registering things into its buddy Consul Client.
|
||||
// Note: because we also test the use of Consul namespaces, this token must be
|
||||
// able to register services, read the keystore, and read node data for any
|
||||
// namespace.
|
||||
// The operator=write permission is required for creating config entries for
|
||||
// connect ingress gateways. operator ACLs are not namespaced, though the
|
||||
// config entries they can generate are.
|
||||
operator = "write"
|
||||
|
||||
agent_prefix "" {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
namespace_prefix "" {
|
||||
// The acl=write permission is required for generating Consul Service Identity
|
||||
// tokens for consul connect services. Those services could be configured for
|
||||
// any Consul namespace the job-submitter has access to.
|
||||
acl = "write"
|
||||
|
||||
key_prefix "" {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
node_prefix "" {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
service_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
}
|
||||
27
e2e/terraform/etc/acls/consul/nomad-server-policy.hcl
Normal file
27
e2e/terraform/etc/acls/consul/nomad-server-policy.hcl
Normal file
@@ -0,0 +1,27 @@
|
||||
// The operator=write permission is required for creating config entries for
|
||||
// connect ingress gateways. operator ACLs are not namespaced, though the
|
||||
// config entries they can generate are.
|
||||
operator = "write"
|
||||
|
||||
agent_prefix "" {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
namespace_prefix "" {
|
||||
// The acl=write permission is required for generating Consul Service Identity
|
||||
// tokens for consul connect services. Those services could be configured for
|
||||
// any Consul namespace the job-submitter has access to.
|
||||
acl = "write"
|
||||
}
|
||||
|
||||
service_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
|
||||
agent_prefix "" {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
node_prefix "" {
|
||||
policy = "read"
|
||||
}
|
||||
@@ -1,12 +1,12 @@
|
||||
# Allow creating tokens under "nomad-cluster" role. The role name should be
|
||||
# updated if "nomad-cluster" is not used.
|
||||
path "auth/token/create/nomad-cluster" {
|
||||
# updated if "nomad-tasks" is not used.
|
||||
path "auth/token/create/nomad-tasks" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
|
||||
# Allow looking up "nomad-cluster" role. The role name should be updated if
|
||||
# "nomad-cluster" is not used.
|
||||
path "auth/token/roles/nomad-cluster" {
|
||||
# Allow looking up "nomad-tasks" role. The role name should be updated if
|
||||
# "nomad-tasks" is not used.
|
||||
path "auth/token/roles/nomad-tasks" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
|
||||
8
e2e/terraform/etc/consul.d/client_acl.json
Normal file
8
e2e/terraform/etc/consul.d/client_acl.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"acl": {
|
||||
"tokens": {
|
||||
"agent": "${token}",
|
||||
"default": "${token}"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
{
|
||||
"log_level": "INFO",
|
||||
"data_dir": "/opt/consul/data",
|
||||
"bind_addr": "0.0.0.0",
|
||||
"client_addr": "0.0.0.0",
|
||||
17
e2e/terraform/etc/consul.d/consul.service
Normal file
17
e2e/terraform/etc/consul.d/consul.service
Normal file
@@ -0,0 +1,17 @@
|
||||
[Unit]
|
||||
Description=Consul Agent
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Restart=on-failure
|
||||
Environment=CONSUL_ALLOW_PRIVILEGED_PORTS=true
|
||||
WorkingDirectory=/etc/consul.d
|
||||
ExecStart=/usr/bin/consul agent -config-dir="/etc/consul.d"
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillSignal=SIGTERM
|
||||
User=consul
|
||||
Group=consul
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
1
e2e/terraform/etc/nomad.d/.environment
Normal file
1
e2e/terraform/etc/nomad.d/.environment
Normal file
@@ -0,0 +1 @@
|
||||
NOMAD_LICENSE=${license}
|
||||
@@ -1,14 +1,7 @@
|
||||
bind_addr = "0.0.0.0"
|
||||
data_dir = "${data_dir}"
|
||||
enable_debug = true
|
||||
|
||||
log_level = "debug"
|
||||
|
||||
data_dir = "/opt/nomad/data"
|
||||
|
||||
bind_addr = "0.0.0.0"
|
||||
|
||||
consul {
|
||||
address = "127.0.0.1:8500"
|
||||
}
|
||||
log_level = "debug"
|
||||
|
||||
audit {
|
||||
enabled = true
|
||||
9
e2e/terraform/etc/nomad.d/client-linux-0.hcl
Normal file
9
e2e/terraform/etc/nomad.d/client-linux-0.hcl
Normal file
@@ -0,0 +1,9 @@
|
||||
client {
|
||||
meta {
|
||||
"rack" = "r1"
|
||||
}
|
||||
|
||||
host_volume "shared_data" {
|
||||
path = "/srv/data"
|
||||
}
|
||||
}
|
||||
5
e2e/terraform/etc/nomad.d/client-linux-1.hcl
Normal file
5
e2e/terraform/etc/nomad.d/client-linux-1.hcl
Normal file
@@ -0,0 +1,5 @@
|
||||
client {
|
||||
meta {
|
||||
"rack" = "r2"
|
||||
}
|
||||
}
|
||||
7
e2e/terraform/etc/nomad.d/client-linux-2.hcl
Normal file
7
e2e/terraform/etc/nomad.d/client-linux-2.hcl
Normal file
@@ -0,0 +1,7 @@
|
||||
datacenter = "dc2"
|
||||
|
||||
client {
|
||||
meta {
|
||||
"rack" = "r1"
|
||||
}
|
||||
}
|
||||
7
e2e/terraform/etc/nomad.d/client-linux-3.hcl
Normal file
7
e2e/terraform/etc/nomad.d/client-linux-3.hcl
Normal file
@@ -0,0 +1,7 @@
|
||||
datacenter = "dc2"
|
||||
|
||||
client {
|
||||
meta {
|
||||
"rack" = "r2"
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,9 @@
|
||||
plugin_dir = "/opt/nomad/plugins"
|
||||
|
||||
client {
|
||||
enabled = true
|
||||
|
||||
meta {
|
||||
"rack" = "r2"
|
||||
}
|
||||
}
|
||||
|
||||
plugin_dir = "/opt/nomad/plugins"
|
||||
plugin "nomad-driver-podman" {
|
||||
config {
|
||||
volumes {
|
||||
@@ -38,8 +35,3 @@ plugin "docker" {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "http://active.vault.service.consul:8200"
|
||||
}
|
||||
12
e2e/terraform/etc/nomad.d/client-windows.hcl
Normal file
12
e2e/terraform/etc/nomad.d/client-windows.hcl
Normal file
@@ -0,0 +1,12 @@
|
||||
log_file = "C:\\opt\\nomad\\nomad.log"
|
||||
plugin_dir = "C:\\opt\\nomad\\plugins"
|
||||
|
||||
client {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
plugin "raw_exec" {
|
||||
config {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
6
e2e/terraform/etc/nomad.d/consul.hcl
Normal file
6
e2e/terraform/etc/nomad.d/consul.hcl
Normal file
@@ -0,0 +1,6 @@
|
||||
consul {
|
||||
address = "127.0.0.1:8500"
|
||||
token = "${token}"
|
||||
client_service_name = "${client_service_name}"
|
||||
server_service_name = "${server_service_name}"
|
||||
}
|
||||
1
e2e/terraform/etc/nomad.d/index.hcl
Normal file
1
e2e/terraform/etc/nomad.d/index.hcl
Normal file
@@ -0,0 +1 @@
|
||||
# This is an empty placeholder for indexed configuration
|
||||
21
e2e/terraform/etc/nomad.d/nomad-client.service
Normal file
21
e2e/terraform/etc/nomad.d/nomad-client.service
Normal file
@@ -0,0 +1,21 @@
|
||||
[Unit]
|
||||
Description=Nomad Agent
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
StartLimitIntervalSec=0
|
||||
StartLimitBurst=3
|
||||
|
||||
[Service]
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/local/bin/nomad agent -config /etc/nomad.d
|
||||
EnvironmentFile=-/etc/nomad.d/.environment
|
||||
KillMode=process
|
||||
KillSignal=SIGINT
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=infinity
|
||||
TasksMax=infinity
|
||||
Restart=on-failure
|
||||
RestartSec=2
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
21
e2e/terraform/etc/nomad.d/nomad-server.service
Normal file
21
e2e/terraform/etc/nomad.d/nomad-server.service
Normal file
@@ -0,0 +1,21 @@
|
||||
[Unit]
|
||||
Description=Nomad Agent
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
StartLimitIntervalSec=0
|
||||
StartLimitBurst=3
|
||||
|
||||
[Service]
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/local/bin/nomad agent -config /etc/nomad.d
|
||||
EnvironmentFile=-/etc/nomad.d/.environment
|
||||
KillMode=process
|
||||
KillSignal=SIGINT
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=infinity
|
||||
TasksMax=infinity
|
||||
Restart=on-failure
|
||||
RestartSec=2
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -2,3 +2,7 @@ server {
|
||||
enabled = true
|
||||
bootstrap_expect = 3
|
||||
}
|
||||
|
||||
acl {
|
||||
enabled = true
|
||||
}
|
||||
11
e2e/terraform/etc/nomad.d/tls.hcl
Normal file
11
e2e/terraform/etc/nomad.d/tls.hcl
Normal file
@@ -0,0 +1,11 @@
|
||||
tls {
|
||||
http = true
|
||||
rpc = true
|
||||
|
||||
ca_file = "/etc/nomad.d/tls/ca.crt"
|
||||
cert_file = "/etc/nomad.d/tls/agent.crt"
|
||||
key_file = "/etc/nomad.d/tls/agent.key"
|
||||
|
||||
verify_server_hostname = true
|
||||
verify_https_client = true
|
||||
}
|
||||
8
e2e/terraform/etc/nomad.d/vault.hcl
Normal file
8
e2e/terraform/etc/nomad.d/vault.hcl
Normal file
@@ -0,0 +1,8 @@
|
||||
vault {
|
||||
enabled = true
|
||||
address = "${url}"
|
||||
task_token_ttl = "1h"
|
||||
create_from_role = "nomad-tasks"
|
||||
namespace = "${namespace}"
|
||||
token = "${token}"
|
||||
}
|
||||
47
e2e/terraform/hcp-vault-auth/main.tf
Normal file
47
e2e/terraform/hcp-vault-auth/main.tf
Normal file
@@ -0,0 +1,47 @@
|
||||
# Vault cluster admin tokens expire after 6 hours, so we need to
|
||||
# generate them fresh for test runs. But we can't generate the token
|
||||
# and then use that token with the vault provider in the same
|
||||
# Terraform run. So you'll need to apply this TF config separately
|
||||
# from the root configuratiion.
|
||||
|
||||
variable "hcp_vault_cluster_id" {
|
||||
description = "The ID of the HCP Vault cluster"
|
||||
type = string
|
||||
default = "nomad-e2e-shared-hcp-vault"
|
||||
}
|
||||
|
||||
variable "hcp_vault_namespace" {
|
||||
description = "The namespace where the HCP Vault cluster policy works"
|
||||
type = string
|
||||
default = "admin"
|
||||
}
|
||||
|
||||
data "hcp_vault_cluster" "e2e_shared_vault" {
|
||||
cluster_id = var.hcp_vault_cluster_id
|
||||
}
|
||||
|
||||
resource "hcp_vault_cluster_admin_token" "admin" {
|
||||
cluster_id = data.hcp_vault_cluster.e2e_shared_vault.cluster_id
|
||||
}
|
||||
|
||||
output "message" {
|
||||
value = <<EOM
|
||||
Your cluster admin token has been provisioned! To prepare the test runner
|
||||
environment, run:
|
||||
|
||||
$(terraform output --raw environment)
|
||||
EOM
|
||||
|
||||
}
|
||||
|
||||
output "environment" {
|
||||
description = "get connection config by running: $(terraform output environment)"
|
||||
sensitive = true
|
||||
value = <<EOM
|
||||
export VAULT_TOKEN=${hcp_vault_cluster_admin_token.admin.token}
|
||||
export VAULT_NAMESPACE=${var.hcp_vault_namespace}
|
||||
export VAULT_ADDR=${data.hcp_vault_cluster.e2e_shared_vault.vault_public_endpoint_url}
|
||||
|
||||
EOM
|
||||
|
||||
}
|
||||
127
e2e/terraform/hcp_consul.tf
Normal file
127
e2e/terraform/hcp_consul.tf
Normal file
@@ -0,0 +1,127 @@
|
||||
# Note: the test environment must have the following values set:
|
||||
# export HCP_CLIENT_ID=
|
||||
# export HCP_CLIENT_SECRET=
|
||||
# export CONSUL_HTTP_TOKEN=
|
||||
# export CONSUL_HTTP_ADDR=
|
||||
|
||||
data "hcp_consul_cluster" "e2e_shared_consul" {
|
||||
cluster_id = var.hcp_consul_cluster_id
|
||||
}
|
||||
|
||||
# policy and configuration for the Consul Agent
|
||||
|
||||
resource "consul_acl_policy" "consul_agent" {
|
||||
name = "consul_agent_policy"
|
||||
datacenters = [var.hcp_consul_cluster_id]
|
||||
rules = data.local_file.consul_policy_for_consul_agent.content
|
||||
}
|
||||
|
||||
data "local_file" "consul_policy_for_consul_agent" {
|
||||
filename = "${path.root}/etc/acls/consul/consul-agent-policy.hcl"
|
||||
}
|
||||
|
||||
resource "consul_acl_token" "consul_agent_token" {
|
||||
description = "Consul agent token"
|
||||
policies = [consul_acl_policy.consul_agent.name]
|
||||
local = true
|
||||
}
|
||||
|
||||
data "consul_acl_token_secret_id" "consul_agent_token" {
|
||||
accessor_id = consul_acl_token.consul_agent_token.id
|
||||
}
|
||||
|
||||
resource "local_file" "consul_acl_file" {
|
||||
sensitive_content = templatefile("etc/consul.d/client_acl.json", {
|
||||
token = data.consul_acl_token_secret_id.consul_agent_token.secret_id
|
||||
})
|
||||
filename = "uploads/shared/consul.d/client_acl.json"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
resource "local_file" "consul_ca_file" {
|
||||
sensitive_content = base64decode(data.hcp_consul_cluster.e2e_shared_consul.consul_ca_file)
|
||||
filename = "uploads/shared/consul.d/ca.pem"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
resource "local_file" "consul_config_file" {
|
||||
sensitive_content = base64decode(data.hcp_consul_cluster.e2e_shared_consul.consul_config_file)
|
||||
filename = "uploads/shared/consul.d/consul_client.json"
|
||||
file_permission = "0744"
|
||||
}
|
||||
|
||||
resource "local_file" "consul_base_config_file" {
|
||||
sensitive_content = templatefile("${path.root}/etc/consul.d/clients.json", {})
|
||||
filename = "uploads/shared/consul.d/consul_client_base.json"
|
||||
file_permission = "0744"
|
||||
}
|
||||
|
||||
resource "local_file" "consul_systemd_unit_file" {
|
||||
sensitive_content = templatefile("${path.root}/etc/consul.d/consul.service", {})
|
||||
filename = "uploads/shared/consul.d/consul.service"
|
||||
file_permission = "0744"
|
||||
}
|
||||
|
||||
# Nomad servers configuration for Consul
|
||||
|
||||
resource "consul_acl_policy" "nomad_servers" {
|
||||
name = "nomad_server_policy"
|
||||
datacenters = [var.hcp_consul_cluster_id]
|
||||
rules = data.local_file.consul_policy_for_nomad_server.content
|
||||
}
|
||||
|
||||
data "local_file" "consul_policy_for_nomad_server" {
|
||||
filename = "${path.root}/etc/acls/consul/nomad-server-policy.hcl"
|
||||
}
|
||||
|
||||
resource "consul_acl_token" "nomad_servers_token" {
|
||||
description = "Nomad servers token"
|
||||
policies = [consul_acl_policy.nomad_servers.name]
|
||||
local = true
|
||||
}
|
||||
|
||||
data "consul_acl_token_secret_id" "nomad_servers_token" {
|
||||
accessor_id = consul_acl_token.nomad_servers_token.id
|
||||
}
|
||||
|
||||
resource "local_file" "nomad_server_config_for_consul" {
|
||||
sensitive_content = templatefile("etc/nomad.d/consul.hcl", {
|
||||
token = data.consul_acl_token_secret_id.nomad_servers_token.secret_id
|
||||
client_service_name = "client-${local.random_name}"
|
||||
server_service_name = "server-${local.random_name}"
|
||||
})
|
||||
filename = "uploads/shared/nomad.d/server-consul.hcl"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
# Nomad clients configuration for Consul
|
||||
|
||||
resource "consul_acl_policy" "nomad_clients" {
|
||||
name = "nomad_client_policy"
|
||||
datacenters = [var.hcp_consul_cluster_id]
|
||||
rules = data.local_file.consul_policy_for_nomad_clients.content
|
||||
}
|
||||
|
||||
data "local_file" "consul_policy_for_nomad_clients" {
|
||||
filename = "${path.root}/etc/acls/consul/nomad-client-policy.hcl"
|
||||
}
|
||||
|
||||
resource "consul_acl_token" "nomad_clients_token" {
|
||||
description = "Nomad clients token"
|
||||
policies = [consul_acl_policy.nomad_clients.name]
|
||||
local = true
|
||||
}
|
||||
|
||||
data "consul_acl_token_secret_id" "nomad_clients_token" {
|
||||
accessor_id = consul_acl_token.nomad_clients_token.id
|
||||
}
|
||||
|
||||
resource "local_file" "nomad_client_config_for_consul" {
|
||||
sensitive_content = templatefile("etc/nomad.d/consul.hcl", {
|
||||
token = data.consul_acl_token_secret_id.nomad_clients_token.secret_id
|
||||
client_service_name = "client-${local.random_name}"
|
||||
server_service_name = "server-${local.random_name}"
|
||||
})
|
||||
filename = "uploads/shared/nomad.d/client-consul.hcl"
|
||||
file_permission = "0600"
|
||||
}
|
||||
49
e2e/terraform/hcp_vault.tf
Normal file
49
e2e/terraform/hcp_vault.tf
Normal file
@@ -0,0 +1,49 @@
|
||||
# Note: the test environment must have the following values set:
|
||||
# export HCP_CLIENT_ID=
|
||||
# export HCP_CLIENT_SECRET=
|
||||
# export VAULT_TOKEN=
|
||||
# export VAULT_ADDR=
|
||||
|
||||
data "hcp_vault_cluster" "e2e_shared_vault" {
|
||||
cluster_id = var.hcp_vault_cluster_id
|
||||
}
|
||||
|
||||
# Nomad servers configuration for Vault
|
||||
|
||||
resource "vault_policy" "nomad" {
|
||||
name = "nomad-server"
|
||||
policy = data.local_file.vault_policy_for_nomad.content
|
||||
}
|
||||
|
||||
data "local_file" "vault_policy_for_nomad" {
|
||||
filename = "${path.root}/etc/acls/vault/nomad-policy.hcl"
|
||||
}
|
||||
|
||||
resource "vault_token" "nomad" {
|
||||
policies = [vault_policy.nomad.name]
|
||||
no_parent = true
|
||||
renewable = true
|
||||
ttl = "72h"
|
||||
}
|
||||
|
||||
# this is the role that Nomad will use for derived tokens. It's not
|
||||
# allowed access to nomad-policy so that only mint tokens for tasks,
|
||||
# not for new clusters
|
||||
resource "vault_token_auth_backend_role" "nomad_cluster" {
|
||||
role_name = "nomad-tasks"
|
||||
disallowed_policies = [vault_policy.nomad.name]
|
||||
orphan = true
|
||||
token_period = "259200"
|
||||
renewable = true
|
||||
token_max_ttl = "0"
|
||||
}
|
||||
|
||||
resource "local_file" "nomad_config_for_vault" {
|
||||
sensitive_content = templatefile("etc/nomad.d/vault.hcl", {
|
||||
token = vault_token.nomad.client_token
|
||||
url = data.hcp_vault_cluster.e2e_shared_vault.vault_private_endpoint_url
|
||||
namespace = var.hcp_vault_namespace
|
||||
})
|
||||
filename = "uploads/shared/nomad.d/vault.hcl"
|
||||
file_permission = "0600"
|
||||
}
|
||||
@@ -6,7 +6,7 @@
|
||||
# that we read in for the output of $(terraform output environment) later.
|
||||
|
||||
locals {
|
||||
nomad_env = var.tls ? "NOMAD_ADDR=https://${aws_instance.server.0.public_ip}:4646 NOMAD_CACERT=keys/tls_ca.crt NOMAD_CLIENT_CERT=keys/tls_api_client.crt NOMAD_CLIENT_KEY=keys/tls_api_client.key" : "NOMAD_ADDR=http://${aws_instance.server.0.public_ip}:4646"
|
||||
nomad_env = "NOMAD_ADDR=https://${aws_instance.server.0.public_ip}:4646 NOMAD_CACERT=keys/tls_ca.crt NOMAD_CLIENT_CERT=keys/tls_api_client.crt NOMAD_CLIENT_KEY=keys/tls_api_client.key"
|
||||
}
|
||||
|
||||
resource "null_resource" "bootstrap_nomad_acls" {
|
||||
@@ -24,7 +24,7 @@ resource "null_resource" "bootstrap_nomad_acls" {
|
||||
# so that we can read it into the data.local_file later. If not set,
|
||||
# ensure that it's empty.
|
||||
data "template_file" "bootstrap_nomad_script" {
|
||||
template = var.nomad_acls ? "${local.nomad_env} ./scripts/bootstrap-nomad.sh" : "mkdir -p ${path.root}/keys; echo > ${path.root}/keys/nomad_root_token"
|
||||
template = "${local.nomad_env} ./scripts/bootstrap-nomad.sh"
|
||||
}
|
||||
|
||||
data "local_file" "nomad_token" {
|
||||
|
||||
@@ -1,36 +1,19 @@
|
||||
module "nomad_server" {
|
||||
|
||||
source = "./provision-nomad"
|
||||
depends_on = [aws_instance.server]
|
||||
count = var.server_count
|
||||
|
||||
platform = "linux_amd64"
|
||||
profile = var.profile
|
||||
platform = "linux"
|
||||
arch = "linux_amd64"
|
||||
role = "server"
|
||||
index = count.index
|
||||
|
||||
# The specific version of Nomad deployed will default to whichever one of
|
||||
# nomad_sha, nomad_version, or nomad_local_binary is set, but if you want to
|
||||
# deploy multiple versions you can use the nomad_*_server variables to
|
||||
# provide a list of builds
|
||||
nomad_version = count.index < length(var.nomad_version_server) ? var.nomad_version_server[count.index] : var.nomad_version
|
||||
|
||||
nomad_sha = count.index < length(var.nomad_sha_server) ? var.nomad_sha_server[count.index] : var.nomad_sha
|
||||
instance = aws_instance.server[count.index]
|
||||
|
||||
nomad_local_binary = count.index < length(var.nomad_local_binary_server) ? var.nomad_local_binary_server[count.index] : var.nomad_local_binary
|
||||
|
||||
nomad_url = count.index < length(var.nomad_url_server) ? var.nomad_url_server[count.index] : var.nomad_url
|
||||
|
||||
nomad_enterprise = var.nomad_enterprise
|
||||
nomad_license = var.nomad_license
|
||||
nomad_acls = var.nomad_acls
|
||||
cluster_name = local.random_name
|
||||
|
||||
tls = var.tls
|
||||
tls_ca_key = tls_private_key.ca.private_key_pem
|
||||
tls_ca_cert = tls_self_signed_cert.ca.cert_pem
|
||||
|
||||
instance = aws_instance.server[count.index]
|
||||
nomad_license = var.nomad_license
|
||||
tls_ca_key = tls_private_key.ca.private_key_pem
|
||||
tls_ca_cert = tls_self_signed_cert.ca.cert_pem
|
||||
|
||||
connection = {
|
||||
type = "ssh"
|
||||
@@ -43,38 +26,21 @@ module "nomad_server" {
|
||||
# TODO: split out the different Linux targets (ubuntu, centos, arm, etc.) when
|
||||
# they're available
|
||||
module "nomad_client_ubuntu_bionic_amd64" {
|
||||
|
||||
source = "./provision-nomad"
|
||||
depends_on = [aws_instance.client_ubuntu_bionic_amd64]
|
||||
count = var.client_count_ubuntu_bionic_amd64
|
||||
|
||||
platform = "linux_amd64"
|
||||
profile = var.profile
|
||||
role = "client-linux"
|
||||
platform = "linux"
|
||||
arch = "linux_amd64"
|
||||
role = "client"
|
||||
index = count.index
|
||||
|
||||
# The specific version of Nomad deployed will default to whichever one of
|
||||
# nomad_sha, nomad_version, or nomad_local_binary is set, but if you want to
|
||||
# deploy multiple versions you can use the nomad_*_client_linux
|
||||
# variables to provide a list of builds
|
||||
nomad_version = count.index < length(var.nomad_version_client_ubuntu_bionic_amd64) ? var.nomad_version_client_ubuntu_bionic_amd64[count.index] : var.nomad_version
|
||||
|
||||
nomad_sha = count.index < length(var.nomad_sha_client_ubuntu_bionic_amd64) ? var.nomad_sha_client_ubuntu_bionic_amd64[count.index] : var.nomad_sha
|
||||
instance = aws_instance.client_ubuntu_bionic_amd64[count.index]
|
||||
|
||||
nomad_local_binary = count.index < length(var.nomad_local_binary_client_ubuntu_bionic_amd64) ? var.nomad_local_binary_client_ubuntu_bionic_amd64[count.index] : var.nomad_local_binary
|
||||
|
||||
nomad_url = count.index < length(var.nomad_url_client_ubuntu_bionic_amd64) ? var.nomad_url_client_ubuntu_bionic_amd64[count.index] : var.nomad_url
|
||||
|
||||
nomad_enterprise = var.nomad_enterprise
|
||||
nomad_acls = false
|
||||
cluster_name = local.random_name
|
||||
|
||||
tls = var.tls
|
||||
tls_ca_key = tls_private_key.ca.private_key_pem
|
||||
tls_ca_cert = tls_self_signed_cert.ca.cert_pem
|
||||
|
||||
instance = aws_instance.client_ubuntu_bionic_amd64[count.index]
|
||||
|
||||
connection = {
|
||||
type = "ssh"
|
||||
user = "ubuntu"
|
||||
@@ -83,44 +49,25 @@ module "nomad_client_ubuntu_bionic_amd64" {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# TODO: split out the different Windows targets (2016, 2019) when they're
|
||||
# available
|
||||
module "nomad_client_windows_2016_amd64" {
|
||||
|
||||
source = "./provision-nomad"
|
||||
depends_on = [aws_instance.client_windows_2016_amd64]
|
||||
count = var.client_count_windows_2016_amd64
|
||||
|
||||
platform = "windows_amd64"
|
||||
profile = var.profile
|
||||
role = "client-windows"
|
||||
platform = "windows"
|
||||
arch = "windows_amd64"
|
||||
role = "client"
|
||||
index = count.index
|
||||
instance = aws_instance.client_windows_2016_amd64[count.index]
|
||||
|
||||
# The specific version of Nomad deployed will default to whichever one of
|
||||
# nomad_sha, nomad_version, or nomad_local_binary is set, but if you want to
|
||||
# deploy multiple versions you can use the nomad_*_client_windows
|
||||
# variables to provide a list of builds
|
||||
nomad_version = count.index < length(var.nomad_version_client_windows_2016_amd64) ? var.nomad_version_client_windows_2016_amd64[count.index] : var.nomad_version
|
||||
|
||||
nomad_sha = count.index < length(var.nomad_sha_client_windows_2016_amd64) ? var.nomad_sha_client_windows_2016_amd64[count.index] : var.nomad_sha
|
||||
|
||||
# if nomad_local_binary is in use, you must pass a nomad_local_binary_client_windows_2016_amd64!
|
||||
nomad_local_binary = count.index < length(var.nomad_local_binary_client_windows_2016_amd64) ? var.nomad_local_binary_client_windows_2016_amd64[count.index] : ""
|
||||
|
||||
# if nomad_url is in use, you must pass a nomad_url_client_windows_2016_amd64!
|
||||
nomad_url = count.index < length(var.nomad_url_client_windows_2016_amd64) ? var.nomad_url_client_windows_2016_amd64[count.index] : ""
|
||||
|
||||
nomad_enterprise = var.nomad_enterprise
|
||||
nomad_acls = false
|
||||
cluster_name = local.random_name
|
||||
|
||||
|
||||
tls = var.tls
|
||||
tls_ca_key = tls_private_key.ca.private_key_pem
|
||||
tls_ca_cert = tls_self_signed_cert.ca.cert_pem
|
||||
|
||||
instance = aws_instance.client_windows_2016_amd64[count.index]
|
||||
|
||||
connection = {
|
||||
type = "ssh"
|
||||
user = "Administrator"
|
||||
|
||||
@@ -38,34 +38,19 @@ ssh into clients with:
|
||||
EOM
|
||||
}
|
||||
|
||||
# Note: Consul and Vault environment needs to be set in test
|
||||
# environment before the Terraform run, so we don't have that output
|
||||
# here
|
||||
output "environment" {
|
||||
description = "get connection config by running: $(terraform output environment)"
|
||||
sensitive = true
|
||||
value = <<EOM
|
||||
%{if var.tls}
|
||||
export NOMAD_ADDR=https://${aws_instance.server[0].public_ip}:4646
|
||||
export CONSUL_HTTP_ADDR=https://${aws_instance.server[0].public_ip}:8501
|
||||
export VAULT_ADDR=https://${aws_instance.server[0].public_ip}:8200
|
||||
|
||||
export NOMAD_CACERT=${abspath(path.root)}/keys/tls_ca.crt
|
||||
export CONSUL_CACERT=${abspath(path.root)}/keys/tls_ca.crt
|
||||
export VAULT_CACERT=${abspath(path.root)}/keys/tls_ca.crt
|
||||
|
||||
export NOMAD_CLIENT_CERT=${abspath(path.root)}/keys/tls_api_client.crt
|
||||
export CONSUL_CLIENT_CERT=${abspath(path.root)}/keys/tls_api_client.crt
|
||||
export VAULT_CLIENT_CERT=${abspath(path.root)}/keys/tls_api_client.crt
|
||||
|
||||
export NOMAD_CLIENT_KEY=${abspath(path.root)}/keys/tls_api_client.key
|
||||
export CONSUL_CLIENT_KEY=${abspath(path.root)}/keys/tls_api_client.key
|
||||
export VAULT_CLIENT_KEY=${abspath(path.root)}/keys/tls_api_client.key
|
||||
%{else}
|
||||
export NOMAD_ADDR=http://${aws_instance.server[0].public_ip}:4646
|
||||
export CONSUL_HTTP_ADDR=http://${aws_instance.server[0].public_ip}:8500
|
||||
export VAULT_ADDR=http://${aws_instance.server[0].public_ip}:8200
|
||||
%{endif}
|
||||
|
||||
export NOMAD_E2E=1
|
||||
export NOMAD_TOKEN=${data.local_file.nomad_token.content}
|
||||
export VAULT_TOKEN=${data.local_file.vault_token.content}
|
||||
export NOMAD_E2E=1
|
||||
|
||||
EOM
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ variable "build_sha" {
|
||||
locals {
|
||||
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
|
||||
distro = "ubuntu-bionic-18.04-amd64-server-*"
|
||||
version = "v2"
|
||||
version = "v3"
|
||||
}
|
||||
|
||||
source "amazon-ebs" "latest_ubuntu_bionic" {
|
||||
|
||||
@@ -1,256 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set +x
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: provision.sh [options...]
|
||||
Options (use one of the following):
|
||||
--nomad_sha SHA full git sha to install from S3
|
||||
--nomad_version VERSION release version number (ex. 0.12.4+ent)
|
||||
--nomad_binary FILEPATH path to file on host
|
||||
--nomad_url URL url to nomad binary archive
|
||||
|
||||
Options for configuration:
|
||||
--config_profile FILEPATH path to config profile directory
|
||||
--role ROLE role within config profile directory
|
||||
--index INDEX count of instance, for profiles with per-instance config
|
||||
--nostart do not start or restart Nomad
|
||||
--enterprise if nomad_sha is passed, use the ENT version
|
||||
--nomad_license set the NOMAD_LICENSE environment variable
|
||||
--nomad_acls write Nomad ACL configuration
|
||||
--autojoin the AWS ConsulAutoJoin tag value
|
||||
--tls
|
||||
--cert FILEPATH
|
||||
--key FILEPATH
|
||||
|
||||
EOF
|
||||
|
||||
exit 2
|
||||
}
|
||||
|
||||
|
||||
INSTALL_DIR=/usr/local/bin
|
||||
INSTALL_PATH="${INSTALL_DIR}/nomad"
|
||||
PLATFORM=linux_amd64
|
||||
START=1
|
||||
install_fn=
|
||||
|
||||
NOMAD_PROFILE=
|
||||
NOMAD_ROLE=
|
||||
NOMAD_INDEX=
|
||||
BUILD_FOLDER="builds-oss"
|
||||
CONSUL_AUTOJOIN=
|
||||
ACLS=0
|
||||
NOMAD_LICENSE=
|
||||
TLS=0
|
||||
|
||||
install_from_s3() {
|
||||
# check that we don't already have this version
|
||||
if [ "$(command -v nomad)" ]; then
|
||||
nomad -version | grep -q "${NOMAD_SHA}" \
|
||||
&& echo "$NOMAD_SHA already installed" && return
|
||||
fi
|
||||
|
||||
S3_URL="s3://nomad-team-dev-test-binaries/${BUILD_FOLDER}/nomad_${PLATFORM}_${NOMAD_SHA}.tar.gz"
|
||||
aws s3 cp --quiet "$S3_URL" nomad.tar.gz
|
||||
sudo tar -zxvf nomad.tar.gz -C "$INSTALL_DIR"
|
||||
set_ownership
|
||||
}
|
||||
|
||||
install_from_uploaded_binary() {
|
||||
# we don't need to check for reinstallation here because we do it at the
|
||||
# user's end so that we're not copying it up if we don't have to
|
||||
sudo cp "$NOMAD_UPLOADED_BINARY" "$INSTALL_PATH"
|
||||
set_ownership
|
||||
}
|
||||
|
||||
install_from_release() {
|
||||
# check that we don't already have this version
|
||||
if [ "$(command -v nomad)" ]; then
|
||||
nomad -version | grep -v 'dev' | grep -q "${NOMAD_VERSION}" \
|
||||
&& echo "$NOMAD_VERSION already installed" && return
|
||||
fi
|
||||
|
||||
RELEASE_URL="https://releases.hashicorp.com/nomad/${NOMAD_VERSION}/nomad_${NOMAD_VERSION}_${PLATFORM}.zip"
|
||||
curl -sL --fail -o /tmp/nomad.zip "$RELEASE_URL"
|
||||
sudo unzip -o /tmp/nomad.zip -d "$INSTALL_DIR"
|
||||
set_ownership
|
||||
}
|
||||
|
||||
install_from_url() {
|
||||
case "${NOMAD_URL}" in
|
||||
*.zip*)
|
||||
curl -sL --fail -o /tmp/nomad.zip "$NOMAD_URL"
|
||||
sudo unzip -o /tmp/nomad.zip -d "$INSTALL_DIR"
|
||||
;;
|
||||
*.tar.gz*)
|
||||
curl -sL --fail -o /tmp/nomad.zip "$NOMAD_URL"
|
||||
sudo tar -zxvf nomad.tar.gz -C "$INSTALL_DIR"
|
||||
;;
|
||||
esac
|
||||
set_ownership
|
||||
}
|
||||
|
||||
set_ownership() {
|
||||
sudo chmod 0755 "$INSTALL_PATH"
|
||||
sudo chown root:root "$INSTALL_PATH"
|
||||
}
|
||||
|
||||
sym() {
|
||||
find "$1" -maxdepth 1 -type f -name "$2" 2>/dev/null \
|
||||
| sudo xargs -I % ln -fs % "$3"
|
||||
}
|
||||
|
||||
install_config_profile() {
|
||||
|
||||
if [ -d /tmp/custom ]; then
|
||||
rm -rf /opt/config/custom
|
||||
sudo mv /tmp/custom /opt/config/
|
||||
fi
|
||||
|
||||
# we're removing the whole directory and recreating to avoid
|
||||
# any quirks around dotfiles that might show up here.
|
||||
sudo rm -rf /etc/nomad.d
|
||||
sudo rm -rf /etc/consul.d
|
||||
sudo rm -rf /etc/vault.d
|
||||
|
||||
sudo mkdir -p /etc/nomad.d
|
||||
sudo mkdir -p /etc/consul.d
|
||||
sudo mkdir -p /etc/vault.d
|
||||
|
||||
sym "${NOMAD_PROFILE}/nomad/" '*' /etc/nomad.d
|
||||
sym "${NOMAD_PROFILE}/consul/" '*' /etc/consul.d
|
||||
sym "${NOMAD_PROFILE}/vault/" '*' /etc/vault.d
|
||||
|
||||
if [ -n "$NOMAD_ROLE" ]; then
|
||||
sym "${NOMAD_PROFILE}/nomad/${NOMAD_ROLE}/" '*' /etc/nomad.d
|
||||
sym "${NOMAD_PROFILE}/consul/${NOMAD_ROLE}/" '*' /etc/consul.d
|
||||
sym "${NOMAD_PROFILE}/vault/${NOMAD_ROLE}/" '*' /etc/vault.d
|
||||
fi
|
||||
if [ -n "$NOMAD_INDEX" ]; then
|
||||
sym "${NOMAD_PROFILE}/nomad/${NOMAD_ROLE}/indexed/" "*${NOMAD_INDEX}*" /etc/nomad.d
|
||||
sym "${NOMAD_PROFILE}/consul/${NOMAD_ROLE}/indexed/" "*${NOMAD_INDEX}*" /etc/consul.d
|
||||
sym "${NOMAD_PROFILE}/vault/${NOMAD_ROLE}/indexed/" "*${NOMAD_INDEX}*" /etc/vault.d
|
||||
fi
|
||||
|
||||
if [ $ACLS == "1" ]; then
|
||||
sudo ln -fs /opt/config/shared/nomad-acl.hcl /etc/nomad.d/acl.hcl
|
||||
fi
|
||||
|
||||
if [ $TLS == "1" ]; then
|
||||
sudo ln -fs /opt/config/shared/nomad-tls.hcl /etc/nomad.d/tls.hcl
|
||||
sudo ln -fs /opt/config/shared/consul-tls.json /etc/consul.d/tls.json
|
||||
sudo cp /opt/config/shared/vault-tls.hcl /etc/vault.d/vault.hcl
|
||||
|
||||
sudo cp -r /tmp/nomad-tls /etc/nomad.d/tls
|
||||
sudo cp -r /tmp/nomad-tls /etc/consul.d/tls
|
||||
sudo cp -r /tmp/nomad-tls /etc/vault.d/tls
|
||||
fi
|
||||
}
|
||||
|
||||
update_consul_autojoin() {
|
||||
sudo sed -i'' -e "s|tag_key=ConsulAutoJoin tag_value=auto-join|tag_key=ConsulAutoJoin tag_value=${CONSUL_AUTOJOIN}|g" /etc/consul.d/*.json
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]
|
||||
do
|
||||
opt="$1"
|
||||
case $opt in
|
||||
--nomad_sha)
|
||||
if [ -z "$2" ]; then echo "Missing sha parameter"; usage; fi
|
||||
NOMAD_SHA="$2"
|
||||
install_fn=install_from_s3
|
||||
shift 2
|
||||
;;
|
||||
--nomad_release | --nomad_version)
|
||||
if [ -z "$2" ]; then echo "Missing version parameter"; usage; fi
|
||||
NOMAD_VERSION="$2"
|
||||
install_fn=install_from_release
|
||||
shift 2
|
||||
;;
|
||||
--nomad_binary)
|
||||
if [ -z "$2" ]; then echo "Missing file parameter"; usage; fi
|
||||
NOMAD_UPLOADED_BINARY="$2"
|
||||
install_fn=install_from_uploaded_binary
|
||||
shift 2
|
||||
;;
|
||||
--nomad_url)
|
||||
if [ -z "$2" ]; then echo "Missing URL parameter"; usage; fi
|
||||
NOMAD_URL="$2"
|
||||
install_fn=install_from_url
|
||||
shift 2
|
||||
;;
|
||||
--config_profile)
|
||||
if [ -z "$2" ]; then echo "Missing profile parameter"; usage; fi
|
||||
NOMAD_PROFILE="/opt/config/${2}"
|
||||
shift 2
|
||||
;;
|
||||
--role)
|
||||
if [ -z "$2" ]; then echo "Missing role parameter"; usage; fi
|
||||
NOMAD_ROLE="$2"
|
||||
shift 2
|
||||
;;
|
||||
--index)
|
||||
if [ -z "$2" ]; then echo "Missing index parameter"; usage; fi
|
||||
NOMAD_INDEX="$2"
|
||||
shift 2
|
||||
;;
|
||||
--autojoin)
|
||||
if [ -z "$2" ]; then ehco "Missing autojoin parameter"; usage; fi
|
||||
CONSUL_AUTOJOIN="$2"
|
||||
shift 2
|
||||
;;
|
||||
--nostart)
|
||||
# for initial packer builds, we don't want to start Nomad
|
||||
START=0
|
||||
shift
|
||||
;;
|
||||
--enterprise)
|
||||
BUILD_FOLDER="builds-ent"
|
||||
shift
|
||||
;;
|
||||
--nomad_license)
|
||||
if [ -z "$2" ]; then echo "Missing license parameter"; usage; fi
|
||||
NOMAD_LICENSE="$2"
|
||||
shift 2
|
||||
;;
|
||||
--nomad_acls)
|
||||
ACLS=1
|
||||
shift
|
||||
;;
|
||||
--tls)
|
||||
TLS=1
|
||||
shift
|
||||
;;
|
||||
*) usage ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# call the appropriate installation function
|
||||
if [ -n "$install_fn" ]; then
|
||||
$install_fn
|
||||
fi
|
||||
if [ -n "$NOMAD_PROFILE" ]; then
|
||||
install_config_profile
|
||||
fi
|
||||
|
||||
if [ -n "$CONSUL_AUTOJOIN" ]; then
|
||||
update_consul_autojoin
|
||||
fi
|
||||
|
||||
sudo touch /etc/nomad.d/.environment
|
||||
if [ -n "$NOMAD_LICENSE" ]; then
|
||||
echo "NOMAD_LICENSE=${NOMAD_LICENSE}" > /tmp/.nomad-environment
|
||||
sudo mv /tmp/.nomad-environment /etc/nomad.d/.environment
|
||||
fi
|
||||
|
||||
if [ $START == "1" ]; then
|
||||
if [ "$NOMAD_ROLE" == "server" ]; then
|
||||
sudo systemctl restart vault
|
||||
fi
|
||||
sudo systemctl restart consul
|
||||
sudo systemctl restart nomad
|
||||
fi
|
||||
@@ -4,11 +4,6 @@
|
||||
|
||||
set -e
|
||||
|
||||
# Will be overwritten at test time with the version specified
|
||||
NOMADVERSION=0.12.7
|
||||
CONSULVERSION=1.9.4+ent
|
||||
VAULTVERSION=1.5.4
|
||||
|
||||
NOMAD_PLUGIN_DIR=/opt/nomad/plugins/
|
||||
|
||||
mkdir_for_root() {
|
||||
@@ -20,9 +15,6 @@ mkdir_for_root() {
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
|
||||
|
||||
sudo mkdir -p /ops/shared
|
||||
sudo chown -R ubuntu:ubuntu /ops/shared
|
||||
|
||||
mkdir_for_root /opt
|
||||
mkdir_for_root /srv/data # for host volumes
|
||||
|
||||
@@ -43,44 +35,31 @@ sudo chown root:root /usr/local/bin/sockaddr
|
||||
# Disable the firewall
|
||||
sudo ufw disable || echo "ufw not installed"
|
||||
|
||||
echo "Install Consul"
|
||||
curl -fsL -o /tmp/consul.zip \
|
||||
"https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip"
|
||||
sudo unzip -q /tmp/consul.zip -d /usr/local/bin
|
||||
sudo chmod 0755 /usr/local/bin/consul
|
||||
sudo chown root:root /usr/local/bin/consul
|
||||
echo "Install HashiCorp apt repositories"
|
||||
curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add -
|
||||
sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
|
||||
sudo apt-get update
|
||||
|
||||
echo "Install Consul and Nomad"
|
||||
sudo apt-get install -y \
|
||||
consul-enterprise \
|
||||
nomad
|
||||
|
||||
# Note: neither service will start on boot because we haven't enabled
|
||||
# the systemd unit file and we haven't uploaded any configuration
|
||||
# files for Consul and Nomad
|
||||
|
||||
echo "Configure Consul"
|
||||
mkdir_for_root /etc/consul.d
|
||||
mkdir_for_root /opt/consul
|
||||
sudo mv /tmp/linux/consul.service /etc/systemd/system/consul.service
|
||||
|
||||
echo "Install Vault"
|
||||
curl -fsL -o /tmp/vault.zip \
|
||||
"https://releases.hashicorp.com/vault/${VAULTVERSION}/vault_${VAULTVERSION}_linux_amd64.zip"
|
||||
sudo unzip -q /tmp/vault.zip -d /usr/local/bin
|
||||
sudo chmod 0755 /usr/local/bin/vault
|
||||
sudo chown root:root /usr/local/bin/vault
|
||||
|
||||
echo "Configure Vault"
|
||||
mkdir_for_root /etc/vault.d
|
||||
mkdir_for_root /opt/vault
|
||||
sudo mv /tmp/linux/vault.service /etc/systemd/system/vault.service
|
||||
|
||||
sudo setcap cap_ipc_lock=+ep /usr/local/bin/vault
|
||||
sudo useradd --system --home /etc/vault.d --shell /bin/false vault
|
||||
|
||||
echo "Configure Nomad"
|
||||
mkdir_for_root /etc/nomad.d
|
||||
mkdir_for_root /opt/nomad
|
||||
mkdir_for_root $NOMAD_PLUGIN_DIR
|
||||
sudo mv /tmp/linux/nomad.service /etc/systemd/system/nomad.service
|
||||
|
||||
echo "Install Nomad"
|
||||
sudo mv /tmp/linux/provision.sh /opt/provision.sh
|
||||
sudo chmod +x /opt/provision.sh
|
||||
/opt/provision.sh --nomad_version $NOMADVERSION --nostart
|
||||
|
||||
echo "Installing third-party apt repositories"
|
||||
|
||||
# Docker
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
[Unit]
|
||||
Description="HashiCorp Vault - A tool for managing secrets"
|
||||
Documentation=https://www.vaultproject.io/docs/
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
ConditionFileNotEmpty=/etc/vault.d/vault.hcl
|
||||
StartLimitIntervalSec=60
|
||||
StartLimitBurst=3
|
||||
|
||||
[Service]
|
||||
User=vault
|
||||
Group=vault
|
||||
ProtectSystem=full
|
||||
ProtectHome=read-only
|
||||
PrivateTmp=yes
|
||||
PrivateDevices=yes
|
||||
SecureBits=keep-caps
|
||||
AmbientCapabilities=CAP_IPC_LOCK
|
||||
Capabilities=CAP_IPC_LOCK+ep
|
||||
CapabilityBoundingSet=CAP_SYSLOG CAP_IPC_LOCK
|
||||
NoNewPrivileges=yes
|
||||
ExecStart=/usr/local/bin/vault server -config=/etc/vault.d/vault.hcl
|
||||
ExecReload=/bin/kill --signal HUP $MAINPID
|
||||
KillMode=process
|
||||
KillSignal=SIGINT
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
TimeoutStopSec=30
|
||||
LimitNOFILE=65536
|
||||
LimitMEMLOCK=infinity
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -5,7 +5,7 @@ variable "build_sha" {
|
||||
|
||||
locals {
|
||||
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
|
||||
version = "v2"
|
||||
version = "v3"
|
||||
}
|
||||
|
||||
source "amazon-ebs" "latest_windows_2016" {
|
||||
@@ -42,19 +42,11 @@ build {
|
||||
"windows-2016-amd64/fix-tls.ps1",
|
||||
"windows-2016-amd64/install-nuget.ps1",
|
||||
"windows-2016-amd64/install-docker.ps1",
|
||||
"windows-2016-amd64/install-consul.ps1"
|
||||
"windows-2016-amd64/install-consul.ps1",
|
||||
"windows-2016-amd64/install-nomad.ps1"
|
||||
]
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
destination = "/opt/provision.ps1"
|
||||
source = "./windows-2016-amd64/provision.ps1"
|
||||
}
|
||||
|
||||
provisioner "powershell" {
|
||||
inline = ["/opt/provision.ps1 -nomad_version 0.12.7 -nostart"]
|
||||
}
|
||||
|
||||
# this restart is required for adding the "containers feature", but we can
|
||||
# wait to do it until right before we do sysprep, which makes debugging
|
||||
# builds slightly faster
|
||||
|
||||
@@ -8,11 +8,11 @@ Set-Location C:\opt
|
||||
|
||||
Try {
|
||||
$releases = "https://releases.hashicorp.com"
|
||||
$version = "1.9.4+ent"
|
||||
$version = "1.11.4+ent"
|
||||
$url = "${releases}/consul/${version}/consul_${version}_windows_amd64.zip"
|
||||
|
||||
New-Item -ItemType Directory -Force -Path C:\opt\consul
|
||||
New-Item -ItemType Directory -Force -Path C:\opt\consul.d
|
||||
New-Item -ItemType Directory -Force -Path C:\etc\consul.d
|
||||
|
||||
# TODO: check sha!
|
||||
Write-Output "Downloading Consul from: $url"
|
||||
@@ -22,6 +22,12 @@ Try {
|
||||
C:\opt\consul.exe version
|
||||
rm consul.zip
|
||||
|
||||
New-Service `
|
||||
-Name "Consul" `
|
||||
-BinaryPathName "C:\opt\consul.exe agent -config-dir C:\etc\consul.d" `
|
||||
-StartupType "Automatic" `
|
||||
-ErrorAction Ignore
|
||||
|
||||
} Catch {
|
||||
Write-Output "Failed to install Consul."
|
||||
Write-Output $_
|
||||
|
||||
46
e2e/terraform/packer/windows-2016-amd64/install-nomad.ps1
Executable file
46
e2e/terraform/packer/windows-2016-amd64/install-nomad.ps1
Executable file
@@ -0,0 +1,46 @@
|
||||
Set-StrictMode -Version latest
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
# Force TLS1.2
|
||||
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
|
||||
|
||||
Set-Location C:\opt
|
||||
|
||||
Try {
|
||||
$releases = "https://releases.hashicorp.com"
|
||||
$version = "1.2.6"
|
||||
$url = "${releases}/nomad/${version}/nomad_${version}_windows_amd64.zip"
|
||||
|
||||
New-Item -ItemType Directory -Force -Path C:\opt\nomad
|
||||
New-Item -ItemType Directory -Force -Path C:\etc\nomad.d
|
||||
|
||||
# TODO: check sha!
|
||||
Write-Output "Downloading Nomad from: $url"
|
||||
Invoke-WebRequest -Uri $url -Outfile nomad.zip -ErrorAction Stop
|
||||
Expand-Archive .\nomad.zip .\ -ErrorAction Stop
|
||||
Move-Item nomad.exe C:\opt\nomad.exe -Force -ErrorAction Stop
|
||||
C:\opt\nomad.exe version
|
||||
rm nomad.zip
|
||||
|
||||
New-NetFirewallRule `
|
||||
-DisplayName 'Nomad HTTP Inbound' `
|
||||
-Profile @('Public', 'Domain', 'Private') `
|
||||
-Direction Inbound `
|
||||
-Action Allow `
|
||||
-Protocol TCP `
|
||||
-LocalPort @('4646')
|
||||
|
||||
New-Service `
|
||||
-Name "Nomad" `
|
||||
-BinaryPathName "C:\opt\nomad.exe agent -config C:\etc\nomad.d" `
|
||||
-StartupType "Automatic" `
|
||||
-ErrorAction Ignore
|
||||
|
||||
} Catch {
|
||||
Write-Output "Failed to install Nomad."
|
||||
Write-Output $_
|
||||
$host.SetShouldExit(-1)
|
||||
throw
|
||||
}
|
||||
|
||||
Write-Output "Installed Nomad."
|
||||
@@ -1,263 +0,0 @@
|
||||
param(
|
||||
[string]$nomad_sha,
|
||||
[string]$nomad_version,
|
||||
[string]$nomad_binary,
|
||||
[string]$nomad_url,
|
||||
[switch]$enterprise = $false,
|
||||
[switch]$nomad_acls = $false,
|
||||
[string]$config_profile,
|
||||
[string]$role,
|
||||
[string]$index,
|
||||
[string]$autojoin,
|
||||
[switch]$nostart = $false
|
||||
)
|
||||
|
||||
Set-StrictMode -Version latest
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
$usage = @"
|
||||
Usage: provision.ps1 [options...]
|
||||
Options (use one of the following):
|
||||
-nomad_sha SHA full git sha to install from S3
|
||||
-nomad_version VERSION release version number (ex. 0.12.4+ent)
|
||||
-nomad_binary FILEPATH path to file on host
|
||||
-nomad_url URL url path to nomad binary archive
|
||||
|
||||
Options for configuration:
|
||||
-config_profile FILEPATH path to config profile directory
|
||||
-role ROLE role within config profile directory
|
||||
-index INDEX count of instance, for profiles with per-instance config
|
||||
-nostart do not start or restart Nomad
|
||||
-enterprise if nomad_sha is passed, use the ENT version
|
||||
--autojoin the AWS ConsulAutoJoin tag value
|
||||
|
||||
"@
|
||||
|
||||
$RunningAsAdmin = ([Security.Principal.WindowsPrincipal] [Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole] "Administrator")
|
||||
if (!$RunningAsAdmin) {
|
||||
Write-Error "Must be executed in Administrator level shell."
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Force TLS1.2
|
||||
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
|
||||
|
||||
|
||||
$install_path = "C:\opt\nomad.exe"
|
||||
$platform = "windows_amd64"
|
||||
|
||||
Set-Location C:\opt
|
||||
|
||||
function Usage {
|
||||
Write-Output "${usage}"
|
||||
}
|
||||
|
||||
function InstallFromS3 {
|
||||
Stop-Service -Name nomad -ErrorAction Ignore
|
||||
|
||||
$build_folder = "builds-oss"
|
||||
if ($enterprise) {
|
||||
$build_folder = "builds-ent"
|
||||
}
|
||||
$key = "${build_folder}/nomad_${platform}_${nomad_sha}.zip"
|
||||
|
||||
Write-Output "Downloading Nomad from s3: $key"
|
||||
Try {
|
||||
Remove-Item -Path ./nomad.zip -Force -ErrorAction Ignore
|
||||
Read-S3Object -BucketName nomad-team-dev-test-binaries `
|
||||
-Key $key -File ./nomad.zip -ErrorAction Stop
|
||||
|
||||
Remove-Item -Path $install_path -Force -ErrorAction Stop
|
||||
Expand-Archive ./nomad.zip ./ -Force -ErrorAction Stop
|
||||
Remove-Item -Path nomad.zip -Force -ErrorAction Ignore
|
||||
|
||||
New-Item -ItemType Directory -Force -Path C:\opt\nomad.d -ErrorAction Stop
|
||||
New-Item -ItemType Directory -Force -Path C:\opt\nomad -ErrorAction Stop
|
||||
} Catch {
|
||||
Write-Output "Failed to install Nomad."
|
||||
Write-Output $_
|
||||
Write-Host $_.ScriptStackTrace
|
||||
$host.SetShouldExit(-1)
|
||||
throw
|
||||
}
|
||||
|
||||
Write-Output "Installed Nomad."
|
||||
}
|
||||
|
||||
function InstallFromUploadedBinary {
|
||||
|
||||
Stop-Service -Name nomad -ErrorAction Ignore
|
||||
|
||||
Try {
|
||||
Remove-Item -Path $install_path -Force -ErrorAction Ignore
|
||||
Move-Item -Path $nomad_binary -Destination $install_path -Force -ErrorAction Stop
|
||||
|
||||
New-Item -ItemType Directory -Force -Path C:\opt\nomad.d -ErrorAction Stop
|
||||
New-Item -ItemType Directory -Force -Path C:\opt\nomad -ErrorAction Stop
|
||||
} Catch {
|
||||
Write-Output "Failed to install Nomad."
|
||||
Write-Output $_
|
||||
$host.SetShouldExit(-1)
|
||||
throw
|
||||
}
|
||||
|
||||
Write-Output "Installed Nomad."
|
||||
}
|
||||
|
||||
function InstallFromRelease {
|
||||
Try {
|
||||
# check that we don't already have this version
|
||||
if (C:\opt\nomad.exe -version `
|
||||
| Select-String -Pattern $nomad_version -SimpleMatch -Quiet) {
|
||||
if (C:\opt\nomad.exe -version `
|
||||
| Select-String -Pattern dev -SimpleMatch -Quiet -NotMatch) {
|
||||
Write-Output "${nomad_version} already installed"
|
||||
return
|
||||
}
|
||||
}
|
||||
} Catch {
|
||||
Write-Output "${nomad_version} not previously installed"
|
||||
}
|
||||
|
||||
Stop-Service -Name nomad -ErrorAction Ignore
|
||||
|
||||
$releases = "https://releases.hashicorp.com"
|
||||
$url = "${releases}/nomad/${nomad_version}/nomad_${nomad_version}_${platform}.zip"
|
||||
|
||||
Write-Output "Downloading Nomad from: $url"
|
||||
Try {
|
||||
Remove-Item -Path ./nomad.zip -Force -ErrorAction Ignore
|
||||
Invoke-WebRequest -Uri $url -Outfile nomad.zip -ErrorAction Stop
|
||||
|
||||
Remove-Item -Path $install_path -Force -ErrorAction Ignore
|
||||
Expand-Archive .\nomad.zip .\ -ErrorAction Stop
|
||||
Remove-Item -Path nomad.zip -Force -ErrorAction Ignore
|
||||
|
||||
New-Item -ItemType Directory -Force -Path C:\opt\nomad.d -ErrorAction Stop
|
||||
New-Item -ItemType Directory -Force -Path C:\opt\nomad -ErrorAction Stop
|
||||
} Catch {
|
||||
Write-Output "Failed to install Nomad."
|
||||
Write-Output $_
|
||||
$host.SetShouldExit(-1)
|
||||
throw
|
||||
}
|
||||
|
||||
Write-Output "Installed Nomad."
|
||||
}
|
||||
|
||||
function InstallFromURL {
|
||||
Stop-Service -Name nomad -ErrorAction Ignore
|
||||
|
||||
Write-Output "Downloading Nomad from: $nomad_url"
|
||||
Try {
|
||||
Remove-Item -Path ./nomad.zip -Force -ErrorAction Ignore
|
||||
Invoke-WebRequest -Uri $nomad_url -Outfile nomad.zip -ErrorAction Stop
|
||||
|
||||
Remove-Item -Path $install_path -Force -ErrorAction Ignore
|
||||
Expand-Archive .\nomad.zip .\ -ErrorAction Stop
|
||||
Remove-Item -Path nomad.zip -Force -ErrorAction Ignore
|
||||
|
||||
New-Item -ItemType Directory -Force -Path C:\opt\nomad.d -ErrorAction Stop
|
||||
New-Item -ItemType Directory -Force -Path C:\opt\nomad -ErrorAction Stop
|
||||
} Catch {
|
||||
Write-Output "Failed to install Nomad."
|
||||
Write-Output $_
|
||||
$host.SetShouldExit(-1)
|
||||
throw
|
||||
}
|
||||
|
||||
Write-Output "Installed Nomad."
|
||||
}
|
||||
|
||||
|
||||
function ConfigFiles($src, $dest) {
|
||||
Get-ChildItem -Path "$src" -Name -Attributes !Directory -ErrorAction Ignore`
|
||||
| ForEach-Object { `
|
||||
New-Item -ItemType SymbolicLink -Path "${dest}\$_" -Target "${src}\$_" }
|
||||
}
|
||||
|
||||
function InstallConfigProfile {
|
||||
|
||||
if ( Test-Path -Path 'C:\tmp\custom' -PathType Container ) {
|
||||
Remote-Item 'C:\opt\config\custom' -Force -ErrorAction Ignore
|
||||
Move-Item -Path 'C:\tmp\custom' -Destination 'C:\opt\config\custom' -Force
|
||||
}
|
||||
|
||||
$cfg = "C:\opt\config\${config_profile}"
|
||||
|
||||
Remove-Item "C:\opt\nomad.d\*" -Force -ErrorAction Ignore
|
||||
Remove-Item "C:\opt\consul.d\*" -Force -ErrorAction Ignore
|
||||
|
||||
ConfigFiles "${cfg}\nomad" "C:\opt\nomad.d"
|
||||
ConfigFiles "${cfg}\consul" "C:\opt\consul.d"
|
||||
|
||||
if ( "" -ne $role ) {
|
||||
ConfigFiles "${cfg}\nomad\${role}" "C:\opt\nomad.d"
|
||||
ConfigFiles "${cfg}\consul\${role}" "C:\opt\consul.d"
|
||||
}
|
||||
|
||||
if ( "" -ne $index ) {
|
||||
ConfigFiles "${cfg}\nomad\${role}\indexed\*${index}*" "C:\opt\nomad.d"
|
||||
ConfigFiles "${cfg}\consul\${role}\indexed\*${index}*" "C:\opt\consul.d"
|
||||
}
|
||||
}
|
||||
|
||||
function UpdateConsulAutojoin {
|
||||
(Get-Content C:\opt\consul.d\aws.json).replace("tag_key=ConsulAutoJoin tag_value=auto-join", "tag_key=ConsulAutoJoin tag_value=${autojoin}") | `
|
||||
Set-Content C:\opt\consul.d\aws.json
|
||||
}
|
||||
|
||||
function CreateConsulService {
|
||||
New-Service `
|
||||
-Name "Consul" `
|
||||
-BinaryPathName "C:\opt\consul.exe agent -config-dir C:\opt\consul.d" `
|
||||
-StartupType "Automatic" `
|
||||
-ErrorAction Ignore
|
||||
}
|
||||
|
||||
function CreateNomadService {
|
||||
New-NetFirewallRule `
|
||||
-DisplayName 'Nomad HTTP Inbound' `
|
||||
-Profile @('Public', 'Domain', 'Private') `
|
||||
-Direction Inbound `
|
||||
-Action Allow `
|
||||
-Protocol TCP `
|
||||
-LocalPort @('4646')
|
||||
|
||||
# idempotently enable as a service
|
||||
New-Service `
|
||||
-Name "Nomad" `
|
||||
-BinaryPathName "C:\opt\nomad.exe agent -config C:\opt\nomad.d" `
|
||||
-StartupType "Automatic" `
|
||||
-ErrorAction Ignore
|
||||
}
|
||||
|
||||
if ( "" -ne $nomad_sha ) {
|
||||
InstallFromS3
|
||||
CreateNomadService
|
||||
}
|
||||
if ( "" -ne $nomad_version ) {
|
||||
InstallFromRelease
|
||||
CreateNomadService
|
||||
}
|
||||
if ( "" -ne $nomad_binary ) {
|
||||
InstallFromUploadedBinary
|
||||
CreateNomadService
|
||||
}
|
||||
if ( "" -ne $nomad_url ) {
|
||||
InstallFromURL
|
||||
CreateNomadService
|
||||
}
|
||||
if ( "" -ne $config_profile) {
|
||||
InstallConfigProfile
|
||||
}
|
||||
if ( "" -ne $autojoin) {
|
||||
UpdateConsulAutojoin
|
||||
}
|
||||
|
||||
if (!($nostart)) {
|
||||
CreateConsulService
|
||||
CreateNomadService
|
||||
Restart-Service "Consul"
|
||||
Restart-Service "Nomad"
|
||||
}
|
||||
@@ -95,11 +95,16 @@ Try {
|
||||
New-NetFirewallRule -Name sshd -DisplayName 'OpenSSH Server (sshd)' `
|
||||
-Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22 -ErrorAction Stop
|
||||
|
||||
# Note: there appears to be a regression in recent versions of
|
||||
# Terraform for file provisioning over ssh for Windows with
|
||||
# powershell as the default shell
|
||||
# See: https://github.com/hashicorp/terraform/issues/30661
|
||||
#
|
||||
# Set powershell as the OpenSSH login shell
|
||||
New-ItemProperty -Path "HKLM:\SOFTWARE\OpenSSH" `
|
||||
-Name DefaultShell `
|
||||
-Value "C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe" `
|
||||
-PropertyType String -Force -ErrorAction Stop
|
||||
# New-ItemProperty -Path "HKLM:\SOFTWARE\OpenSSH" `
|
||||
# -Name DefaultShell `
|
||||
# -Value "C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe" `
|
||||
# -PropertyType String -Force -ErrorAction Stop
|
||||
|
||||
Write-Output "Installed OpenSSH."
|
||||
|
||||
|
||||
129
e2e/terraform/provision-nomad/install-linux.tf
Normal file
129
e2e/terraform/provision-nomad/install-linux.tf
Normal file
@@ -0,0 +1,129 @@
|
||||
resource "local_file" "nomad_systemd_unit_file" {
|
||||
sensitive_content = templatefile("etc/nomad.d/nomad-${var.role}.service", {})
|
||||
filename = "${local.upload_dir}/nomad.d/nomad.service"
|
||||
file_permission = "0700"
|
||||
}
|
||||
|
||||
resource "null_resource" "install_nomad_binary_linux" {
|
||||
count = var.platform == "linux" ? 1 : 0
|
||||
triggers = { nomad_binary_sha = filemd5(var.nomad_local_binary) }
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = var.connection.user
|
||||
host = var.instance.public_ip
|
||||
port = var.connection.port
|
||||
private_key = file(var.connection.private_key)
|
||||
timeout = "5m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.nomad_local_binary
|
||||
destination = "/tmp/nomad"
|
||||
}
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv /tmp/nomad /usr/local/bin/nomad",
|
||||
"sudo chmod +x /usr/local/bin/nomad",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "install_consul_configs_linux" {
|
||||
count = var.platform == "linux" ? 1 : 0
|
||||
|
||||
depends_on = [
|
||||
null_resource.upload_consul_configs,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = var.connection.user
|
||||
host = var.instance.public_ip
|
||||
port = var.connection.port
|
||||
private_key = file(var.connection.private_key)
|
||||
timeout = "5m"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"mkdir -p /etc/consul.d",
|
||||
"sudo rm -rf /etc/consul.d/*",
|
||||
"sudo mv /tmp/consul_ca.pem /etc/consul.d/ca.pem",
|
||||
"sudo mv /tmp/consul_client_acl.json /etc/consul.d/acl.json",
|
||||
"sudo mv /tmp/consul_client.json /etc/consul.d/consul_client.json",
|
||||
"sudo mv /tmp/consul_client_base.json /etc/consul.d/consul_client_base.json",
|
||||
"sudo mv /tmp/consul.service /etc/systemd/system/consul.service",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "install_nomad_configs_linux" {
|
||||
count = var.platform == "linux" ? 1 : 0
|
||||
|
||||
depends_on = [
|
||||
null_resource.upload_nomad_configs,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = var.connection.user
|
||||
host = var.instance.public_ip
|
||||
port = var.connection.port
|
||||
private_key = file(var.connection.private_key)
|
||||
timeout = "5m"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"mkdir -p /etc/nomad.d",
|
||||
"mkdir -p /opt/nomad/data",
|
||||
"sudo rm -rf /etc/nomad.d/*",
|
||||
"sudo mv /tmp/consul.hcl /etc/nomad.d/consul.hcl",
|
||||
"sudo mv /tmp/vault.hcl /etc/nomad.d/vault.hcl",
|
||||
"sudo mv /tmp/base.hcl /etc/nomad.d/base.hcl",
|
||||
"sudo mv /tmp/${var.role}-${var.platform}.hcl /etc/nomad.d/${var.role}-${var.platform}.hcl",
|
||||
"sudo mv /tmp/${var.role}-${var.platform}-${var.index}.hcl /etc/nomad.d/${var.role}-${var.platform}-${var.index}.hcl",
|
||||
"sudo mv /tmp/.environment /etc/nomad.d/.environment",
|
||||
|
||||
# TLS
|
||||
"sudo mkdir /etc/nomad.d/tls",
|
||||
"sudo mv /tmp/tls.hcl /etc/nomad.d/tls.hcl",
|
||||
"sudo mv /tmp/agent-${var.instance.public_ip}.key /etc/nomad.d/tls/agent.key",
|
||||
"sudo mv /tmp/agent-${var.instance.public_ip}.crt /etc/nomad.d/tls/agent.crt",
|
||||
"sudo mv /tmp/ca.crt /etc/nomad.d/tls/ca.crt",
|
||||
|
||||
"sudo mv /tmp/nomad.service /etc/systemd/system/nomad.service",
|
||||
]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "null_resource" "restart_linux_services" {
|
||||
count = var.platform == "linux" ? 1 : 0
|
||||
|
||||
depends_on = [
|
||||
null_resource.install_nomad_binary_linux,
|
||||
null_resource.install_consul_configs_linux,
|
||||
null_resource.install_nomad_configs_linux,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = var.connection.user
|
||||
host = var.instance.public_ip
|
||||
port = var.connection.port
|
||||
private_key = file(var.connection.private_key)
|
||||
timeout = "5m"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo systemctl daemon-reload",
|
||||
"sudo systemctl enable consul",
|
||||
"sudo systemctl restart consul",
|
||||
"sudo systemctl enable nomad",
|
||||
"sudo systemctl restart nomad",
|
||||
]
|
||||
}
|
||||
}
|
||||
123
e2e/terraform/provision-nomad/install-windows.tf
Normal file
123
e2e/terraform/provision-nomad/install-windows.tf
Normal file
@@ -0,0 +1,123 @@
|
||||
resource "null_resource" "install_nomad_binary_windows" {
|
||||
count = var.platform == "windows" ? 1 : 0
|
||||
triggers = { nomad_binary_sha = filemd5(var.nomad_local_binary) }
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = var.connection.user
|
||||
host = var.instance.public_ip
|
||||
port = var.connection.port
|
||||
private_key = file(var.connection.private_key)
|
||||
target_platform = "windows"
|
||||
timeout = "10m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.nomad_local_binary
|
||||
destination = "/tmp/nomad"
|
||||
}
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"powershell Move-Item -Force -Path C://tmp/nomad -Destination C:/opt/nomad.exe",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "install_consul_configs_windows" {
|
||||
count = var.platform == "windows" ? 1 : 0
|
||||
|
||||
depends_on = [
|
||||
null_resource.upload_consul_configs,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = var.connection.user
|
||||
host = var.instance.public_ip
|
||||
port = var.connection.port
|
||||
private_key = file(var.connection.private_key)
|
||||
target_platform = "windows"
|
||||
timeout = "10m"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"powershell Remove-Item -Force -Recurse -Path C://etc/consul.d",
|
||||
"powershell New-Item -Force -Path C:// -Name opt -ItemType directory",
|
||||
"powershell New-Item -Force -Path C://etc -Name consul.d -ItemType directory",
|
||||
"powershell Move-Item -Force -Path C://tmp/consul_ca.pem C://Windows/System32/ca.pem",
|
||||
"powershell Move-Item -Force -Path C://tmp/consul_client_acl.json C://etc/consul.d/acl.json",
|
||||
"powershell Move-Item -Force -Path C://tmp/consul_client.json C://etc/consul.d/consul_client.json",
|
||||
"powershell Move-Item -Force -Path C://tmp/consul_client_base.json C://etc/consul.d/consul_client_base.json",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "install_nomad_configs_windows" {
|
||||
count = var.platform == "windows" ? 1 : 0
|
||||
|
||||
depends_on = [
|
||||
null_resource.upload_nomad_configs,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = var.connection.user
|
||||
host = var.instance.public_ip
|
||||
port = var.connection.port
|
||||
private_key = file(var.connection.private_key)
|
||||
target_platform = "windows"
|
||||
timeout = "10m"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"powershell Remove-Item -Force -Recurse -Path C://etc/nomad.d",
|
||||
"powershell New-Item -Force -Path C:// -Name opt -ItemType directory",
|
||||
"powershell New-Item -Force -Path C:// -Name etc -ItemType directory",
|
||||
"powershell New-Item -Force -Path C://etc/ -Name nomad.d -ItemType directory",
|
||||
"powershell New-Item -Force -Path C://opt/ -Name nomad -ItemType directory",
|
||||
"powershell New-Item -Force -Path C://opt/nomad -Name data -ItemType directory",
|
||||
"powershell Move-Item -Force -Path C://tmp/consul.hcl C://etc/nomad.d/consul.hcl",
|
||||
"powershell Move-Item -Force -Path C://tmp/vault.hcl C://etc/nomad.d/vault.hcl",
|
||||
"powershell Move-Item -Force -Path C://tmp/base.hcl C://etc/nomad.d/base.hcl",
|
||||
"powershell Move-Item -Force -Path C://tmp/${var.role}-${var.platform}.hcl C://etc/nomad.d/${var.role}-${var.platform}.hcl",
|
||||
"powershell Move-Item -Force -Path C://tmp/${var.role}-${var.platform}-${var.index}.hcl C://etc/nomad.d/${var.role}-${var.platform}-${var.index}.hcl",
|
||||
"powershell Move-Item -Force -Path C://tmp/.environment C://etc/nomad.d/.environment",
|
||||
|
||||
# TLS
|
||||
"powershell New-Item -Force -Path C://etc/nomad.d -Name tls -ItemType directory",
|
||||
"powershell Move-Item -Force -Path C://tmp/tls.hcl C://etc/nomad.d/tls.hcl",
|
||||
"powershell Move-Item -Force -Path C://tmp/agent-${var.instance.public_ip}.key C://etc/nomad.d/tls/agent.key",
|
||||
"powershell Move-Item -Force -Path C://tmp/agent-${var.instance.public_ip}.crt C://etc/nomad.d/tls/agent.crt",
|
||||
"powershell Move-Item -Force -Path C://tmp/ca.crt C://etc/nomad.d/tls/ca.crt",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "restart_windows_services" {
|
||||
count = var.platform == "windows" ? 1 : 0
|
||||
|
||||
depends_on = [
|
||||
null_resource.install_nomad_binary_windows,
|
||||
null_resource.install_consul_configs_windows,
|
||||
null_resource.install_nomad_configs_windows,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = var.connection.user
|
||||
host = var.instance.public_ip
|
||||
port = var.connection.port
|
||||
private_key = file(var.connection.private_key)
|
||||
target_platform = "windows"
|
||||
timeout = "10m"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"powershell Restart-Service Consul",
|
||||
"powershell Restart-Service Nomad"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,33 +1,46 @@
|
||||
locals {
|
||||
provision_script = var.platform == "windows_amd64" ? "powershell C:/opt/provision.ps1" : "/opt/provision.sh"
|
||||
upload_dir = "uploads/${var.instance.public_ip}"
|
||||
|
||||
config_path = dirname("${path.root}/config/")
|
||||
indexed_config_path = fileexists("etc/nomad.d/${var.role}-${var.platform}-${var.index}.hcl") ? "etc/nomad.d/${var.role}-${var.platform}-${var.index}.hcl" : "etc/nomad.d/index.hcl"
|
||||
|
||||
config_files = compact(setunion(
|
||||
fileset(local.config_path, "**"),
|
||||
))
|
||||
|
||||
update_config_command = var.platform == "windows_amd64" ? "powershell -Command \"& { if (test-path /opt/config) { Remove-Item -Path /opt/config -Force -Recurse }; cp -r C:/tmp/config /opt/config }\"" : "sudo rm -rf /opt/config; sudo mv /tmp/config /opt/config"
|
||||
|
||||
# abstract-away platform-specific parameter expectations
|
||||
_arg = var.platform == "windows_amd64" ? "-" : "--"
|
||||
|
||||
tls_role = var.role == "server" ? "server" : "client"
|
||||
}
|
||||
|
||||
resource "null_resource" "provision_nomad" {
|
||||
# if nomad_license is unset, it'll be a harmless empty license file
|
||||
resource "local_file" "nomad_environment" {
|
||||
sensitive_content = templatefile("etc/nomad.d/.environment", {
|
||||
license = var.nomad_license
|
||||
})
|
||||
filename = "${local.upload_dir}/nomad.d/.environment"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
null_resource.upload_configs,
|
||||
null_resource.upload_nomad_binary,
|
||||
null_resource.generate_instance_tls_certs
|
||||
]
|
||||
resource "local_file" "nomad_base_config" {
|
||||
sensitive_content = templatefile("etc/nomad.d/base.hcl", {
|
||||
data_dir = var.platform != "windows" ? "/opt/nomad/data" : "C://opt/nomad/data"
|
||||
})
|
||||
filename = "${local.upload_dir}/nomad.d/base.hcl"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
# no need to re-run if nothing changes
|
||||
triggers = {
|
||||
script = data.template_file.provision_script.rendered
|
||||
}
|
||||
resource "local_file" "nomad_role_config" {
|
||||
sensitive_content = templatefile("etc/nomad.d/${var.role}-${var.platform}.hcl", {})
|
||||
filename = "${local.upload_dir}/nomad.d/${var.role}.hcl"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
resource "local_file" "nomad_indexed_config" {
|
||||
sensitive_content = templatefile(local.indexed_config_path, {})
|
||||
filename = "${local.upload_dir}/nomad.d/${var.role}-${var.platform}-${var.index}.hcl"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
resource "local_file" "nomad_tls_config" {
|
||||
sensitive_content = templatefile("etc/nomad.d/tls.hcl", {})
|
||||
filename = "${local.upload_dir}/nomad.d/tls.hcl"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
resource "null_resource" "upload_consul_configs" {
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
@@ -35,75 +48,33 @@ resource "null_resource" "provision_nomad" {
|
||||
host = var.instance.public_ip
|
||||
port = var.connection.port
|
||||
private_key = file(var.connection.private_key)
|
||||
target_platform = var.platform == "windows_amd64" ? "windows" : "unix"
|
||||
target_platform = var.arch == "windows_amd64" ? "windows" : "unix"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [data.template_file.provision_script.rendered]
|
||||
provisioner "file" {
|
||||
source = "uploads/shared/consul.d/ca.pem"
|
||||
destination = "/tmp/consul_ca.pem"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
data "template_file" "provision_script" {
|
||||
template = "${local.provision_script}${data.template_file.arg_nomad_url.rendered}${data.template_file.arg_nomad_sha.rendered}${data.template_file.arg_nomad_version.rendered}${data.template_file.arg_nomad_binary.rendered}${data.template_file.arg_nomad_enterprise.rendered}${data.template_file.arg_nomad_license.rendered}${data.template_file.arg_nomad_acls.rendered}${data.template_file.arg_nomad_tls.rendered}${data.template_file.arg_profile.rendered}${data.template_file.arg_role.rendered}${data.template_file.arg_index.rendered}${data.template_file.autojoin_tag.rendered}"
|
||||
}
|
||||
|
||||
data "template_file" "arg_nomad_sha" {
|
||||
template = var.nomad_sha != "" && var.nomad_local_binary == "" && var.nomad_url == "" ? " ${local._arg}nomad_sha ${var.nomad_sha}" : ""
|
||||
}
|
||||
|
||||
data "template_file" "arg_nomad_version" {
|
||||
template = var.nomad_version != "" && var.nomad_sha == "" && var.nomad_url == "" && var.nomad_local_binary == "" ? " ${local._arg}nomad_version ${var.nomad_version}" : ""
|
||||
}
|
||||
|
||||
data "template_file" "arg_nomad_url" {
|
||||
template = var.nomad_url != "" && var.nomad_local_binary == "" ? " ${local._arg}nomad_url '${var.nomad_url}'" : ""
|
||||
}
|
||||
|
||||
data "template_file" "arg_nomad_binary" {
|
||||
template = var.nomad_local_binary != "" ? " ${local._arg}nomad_binary /tmp/nomad" : ""
|
||||
}
|
||||
|
||||
data "template_file" "arg_nomad_enterprise" {
|
||||
template = var.nomad_enterprise ? " ${local._arg}enterprise" : ""
|
||||
}
|
||||
|
||||
data "template_file" "arg_nomad_license" {
|
||||
template = var.nomad_license != "" ? " ${local._arg}nomad_license ${var.nomad_license}" : ""
|
||||
}
|
||||
|
||||
data "template_file" "arg_nomad_acls" {
|
||||
template = var.nomad_acls ? " ${local._arg}nomad_acls" : ""
|
||||
}
|
||||
|
||||
data "template_file" "arg_nomad_tls" {
|
||||
template = var.tls ? " ${local._arg}tls" : ""
|
||||
}
|
||||
|
||||
data "template_file" "arg_profile" {
|
||||
template = var.profile != "" ? " ${local._arg}config_profile ${var.profile}" : ""
|
||||
}
|
||||
|
||||
data "template_file" "arg_role" {
|
||||
template = var.role != "" ? " ${local._arg}role ${var.role}" : ""
|
||||
}
|
||||
|
||||
data "template_file" "arg_index" {
|
||||
template = var.index != "" ? " ${local._arg}index ${var.index}" : ""
|
||||
}
|
||||
|
||||
data "template_file" "autojoin_tag" {
|
||||
template = var.cluster_name != "" ? " ${local._arg}autojoin auto-join-${var.cluster_name}" : ""
|
||||
}
|
||||
|
||||
resource "null_resource" "upload_nomad_binary" {
|
||||
|
||||
count = var.nomad_local_binary != "" ? 1 : 0
|
||||
depends_on = [null_resource.upload_configs]
|
||||
triggers = {
|
||||
nomad_binary_sha = filemd5(var.nomad_local_binary)
|
||||
provisioner "file" {
|
||||
source = "uploads/shared/consul.d/consul_client.json"
|
||||
destination = "/tmp/consul_client.json"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = "uploads/shared/consul.d/client_acl.json"
|
||||
destination = "/tmp/consul_client_acl.json"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = "uploads/shared/consul.d/consul_client_base.json"
|
||||
destination = "/tmp/consul_client_base.json"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = "uploads/shared/consul.d/consul.service"
|
||||
destination = "/tmp/consul.service"
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "upload_nomad_configs" {
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
@@ -111,134 +82,55 @@ resource "null_resource" "upload_nomad_binary" {
|
||||
host = var.instance.public_ip
|
||||
port = var.connection.port
|
||||
private_key = file(var.connection.private_key)
|
||||
target_platform = var.platform == "windows_amd64" ? "windows" : "unix"
|
||||
target_platform = var.arch == "windows_amd64" ? "windows" : "unix"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
# created in hcp_consul.tf
|
||||
provisioner "file" {
|
||||
source = var.nomad_local_binary
|
||||
destination = "/tmp/nomad"
|
||||
source = "uploads/shared/nomad.d/${var.role}-consul.hcl"
|
||||
destination = "/tmp/consul.hcl"
|
||||
}
|
||||
# created in hcp_vault.tf
|
||||
provisioner "file" {
|
||||
source = "uploads/shared/nomad.d/vault.hcl"
|
||||
destination = "/tmp/vault.hcl"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = local_file.nomad_environment.filename
|
||||
destination = "/tmp/.environment"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = local_file.nomad_base_config.filename
|
||||
destination = "/tmp/base.hcl"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = local_file.nomad_role_config.filename
|
||||
destination = "/tmp/${var.role}-${var.platform}.hcl"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = local_file.nomad_indexed_config.filename
|
||||
destination = "/tmp/${var.role}-${var.platform}-${var.index}.hcl"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = local_file.nomad_tls_config.filename
|
||||
destination = "/tmp/tls.hcl"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = local_file.nomad_systemd_unit_file.filename
|
||||
destination = "/tmp/nomad.service"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = local_file.nomad_client_key.filename
|
||||
destination = "/tmp/agent-${var.instance.public_ip}.key"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = local_file.nomad_client_cert.filename
|
||||
destination = "/tmp/agent-${var.instance.public_ip}.crt"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = "keys/tls_ca.crt"
|
||||
destination = "/tmp/ca.crt"
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "upload_configs" {
|
||||
|
||||
triggers = {
|
||||
hashes = join(",", [for file in local.config_files : filemd5("${local.config_path}/${file}")])
|
||||
}
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = var.connection.user
|
||||
host = var.instance.public_ip
|
||||
port = var.connection.port
|
||||
private_key = file(var.connection.private_key)
|
||||
target_platform = var.platform == "windows_amd64" ? "windows" : "unix"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = local.config_path
|
||||
destination = "/tmp/"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [local.update_config_command]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TODO: Create separate certs.
|
||||
// This creates one set of certs to manage Nomad, Consul, and Vault and therefore
|
||||
// puts all the required SAN entries to enable sharing certs. This is an anti-pattern
|
||||
// that we should clean up.
|
||||
resource "null_resource" "generate_instance_tls_certs" {
|
||||
count = var.tls ? 1 : 0
|
||||
depends_on = [null_resource.upload_configs]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = var.connection.user
|
||||
host = var.instance.public_ip
|
||||
port = var.connection.port
|
||||
private_key = file(var.connection.private_key)
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = <<EOF
|
||||
set -e
|
||||
|
||||
cat <<'EOT' > keys/ca.crt
|
||||
${var.tls_ca_cert}
|
||||
EOT
|
||||
|
||||
cat <<'EOT' > keys/ca.key
|
||||
${var.tls_ca_key}
|
||||
EOT
|
||||
|
||||
openssl req -newkey rsa:2048 -nodes \
|
||||
-subj "/CN=${local.tls_role}.global.nomad" \
|
||||
-keyout keys/agent-${var.instance.public_ip}.key \
|
||||
-out keys/agent-${var.instance.public_ip}.csr
|
||||
|
||||
cat <<'NEOY' > keys/agent-${var.instance.public_ip}.conf
|
||||
|
||||
subjectAltName=DNS:${local.tls_role}.global.nomad,DNS:${local.tls_role}.dc1.consul,DNS:localhost,DNS:${var.instance.public_dns},DNS:vault.service.consul,DNS:active.vault.service.consul,IP:127.0.0.1,IP:${var.instance.private_ip},IP:${var.instance.public_ip}
|
||||
extendedKeyUsage = serverAuth, clientAuth
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = digitalSignature, keyEncipherment
|
||||
NEOY
|
||||
|
||||
openssl x509 -req -CAcreateserial \
|
||||
-extfile ./keys/agent-${var.instance.public_ip}.conf \
|
||||
-days 365 \
|
||||
-sha256 \
|
||||
-CA keys/ca.crt \
|
||||
-CAkey keys/ca.key \
|
||||
-in keys/agent-${var.instance.public_ip}.csr \
|
||||
-out keys/agent-${var.instance.public_ip}.crt
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"mkdir -p /tmp/nomad-tls",
|
||||
]
|
||||
}
|
||||
provisioner "file" {
|
||||
source = "keys/ca.crt"
|
||||
destination = "/tmp/nomad-tls/ca.crt"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = "keys/agent-${var.instance.public_ip}.crt"
|
||||
destination = "/tmp/nomad-tls/agent.crt"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = "keys/agent-${var.instance.public_ip}.key"
|
||||
destination = "/tmp/nomad-tls/agent.key"
|
||||
}
|
||||
# workaround to avoid updating packer
|
||||
provisioner "file" {
|
||||
source = "packer/ubuntu-bionic-amd64/provision.sh"
|
||||
destination = "/opt/provision.sh"
|
||||
}
|
||||
provisioner "file" {
|
||||
source = "config"
|
||||
destination = "/tmp/config"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo cp -r /tmp/nomad-tls /opt/config/${var.profile}/nomad/tls",
|
||||
"sudo cp -r /tmp/nomad-tls /opt/config/${var.profile}/consul/tls",
|
||||
"sudo cp -r /tmp/nomad-tls /opt/config/${var.profile}/vault/tls",
|
||||
|
||||
# more workaround
|
||||
"sudo rm -rf /opt/config",
|
||||
"sudo mv /tmp/config /opt/config"
|
||||
]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
42
e2e/terraform/provision-nomad/tls.tf
Normal file
42
e2e/terraform/provision-nomad/tls.tf
Normal file
@@ -0,0 +1,42 @@
|
||||
resource "tls_private_key" "nomad" {
|
||||
algorithm = "ECDSA"
|
||||
ecdsa_curve = "P384"
|
||||
}
|
||||
|
||||
resource "tls_cert_request" "nomad" {
|
||||
key_algorithm = "ECDSA"
|
||||
private_key_pem = tls_private_key.nomad.private_key_pem
|
||||
ip_addresses = [var.instance.public_ip, var.instance.private_ip, "127.0.0.1"]
|
||||
dns_names = ["${var.role}.global.nomad"]
|
||||
|
||||
subject {
|
||||
common_name = "${var.role}.global.nomad"
|
||||
}
|
||||
}
|
||||
|
||||
resource "tls_locally_signed_cert" "nomad" {
|
||||
cert_request_pem = tls_cert_request.nomad.cert_request_pem
|
||||
ca_key_algorithm = var.tls_ca_algorithm
|
||||
ca_private_key_pem = var.tls_ca_key
|
||||
ca_cert_pem = var.tls_ca_cert
|
||||
|
||||
validity_period_hours = 720
|
||||
|
||||
# Reasonable set of uses for a server SSL certificate.
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"client_auth",
|
||||
"server_auth",
|
||||
]
|
||||
}
|
||||
|
||||
resource "local_file" "nomad_client_key" {
|
||||
sensitive_content = tls_private_key.nomad.private_key_pem
|
||||
filename = "keys/agent-${var.instance.public_ip}.key"
|
||||
}
|
||||
|
||||
resource "local_file" "nomad_client_cert" {
|
||||
sensitive_content = tls_locally_signed_cert.nomad.cert_pem
|
||||
filename = "keys/agent-${var.instance.public_ip}.crt"
|
||||
}
|
||||
@@ -1,55 +1,19 @@
|
||||
variable "platform" {
|
||||
type = string
|
||||
description = "Platform ID (ex. \"linux_amd64\" or \"windows_amd64\")"
|
||||
default = "linux_amd64"
|
||||
}
|
||||
|
||||
variable "nomad_version" {
|
||||
type = string
|
||||
description = "Nomad release version (ex. \"0.10.3\")"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "nomad_sha" {
|
||||
type = string
|
||||
description = "Nomad build full SHA (ex. \"fef22bdbfa094b5d076710354275e360867261aa\")"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "nomad_local_binary" {
|
||||
type = string
|
||||
description = "Path to local Nomad build (ex. \"/home/me/bin/nomad\")"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "nomad_url" {
|
||||
type = string
|
||||
description = "URL to Nomad binary (ex. \"https://circleci.com/.../linux_amd64.zip\")"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "nomad_enterprise" {
|
||||
type = bool
|
||||
description = "If nomad_sha is used, deploy Nomad Enterprise"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "nomad_license" {
|
||||
type = string
|
||||
description = "The enterprise license to use. overrides Nomad temporary license"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "nomad_acls" {
|
||||
type = bool
|
||||
description = "Bootstrap ACLs"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "tls" {
|
||||
type = bool
|
||||
description = "Bootstrap TLS"
|
||||
default = false
|
||||
variable "tls_ca_algorithm" {
|
||||
type = string
|
||||
description = "CA private key algorithm"
|
||||
default = "ECDSA"
|
||||
}
|
||||
|
||||
variable "tls_ca_key" {
|
||||
@@ -64,15 +28,21 @@ variable "tls_ca_cert" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "profile" {
|
||||
variable "arch" {
|
||||
type = string
|
||||
description = "The name of the configuration profile (ex. 'full-cluster')"
|
||||
default = ""
|
||||
description = "The architecture for this instance (ex. 'linux_amd64' or 'windows_amd64')"
|
||||
default = "linux_amd64"
|
||||
}
|
||||
|
||||
variable "platform" {
|
||||
type = string
|
||||
description = "The platform for this instance (ex. 'windows' or 'linux')"
|
||||
default = "linux"
|
||||
}
|
||||
|
||||
variable "role" {
|
||||
type = string
|
||||
description = "The role in the configuration profile for this instance (ex. 'client-linux')"
|
||||
description = "The role for this instance (ex. 'client' or 'server')"
|
||||
default = ""
|
||||
}
|
||||
|
||||
@@ -82,12 +52,6 @@ variable "index" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
description = "The random name assigned to the cluster"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "instance" {
|
||||
type = object({
|
||||
id = string
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
# unseal vault and get a root operator token; the vault is configured to
|
||||
# autounseal with AWS KMS
|
||||
while true :
|
||||
do
|
||||
ROOT_TOKEN=$(vault operator init -recovery-shares=1 -recovery-threshold=1 | awk '/Initial Root Token/{print $4}')
|
||||
if [ ! -z $ROOT_TOKEN ]; then break; fi
|
||||
sleep 5
|
||||
done
|
||||
set -e
|
||||
|
||||
export VAULT_TOKEN="$ROOT_TOKEN"
|
||||
|
||||
mkdir -p ../keys
|
||||
echo $VAULT_TOKEN > "${DIR}/../keys/vault_root_token"
|
||||
|
||||
# write policies for Nomad to Vault, and then configure Nomad to use the
|
||||
# token from those policies
|
||||
|
||||
vault policy write nomad-server "${DIR}/vault-nomad-server-policy.hcl"
|
||||
vault write /auth/token/roles/nomad-cluster "@${DIR}/vault-nomad-cluster-role.json"
|
||||
|
||||
NOMAD_VAULT_TOKEN=$(vault token create -policy nomad-server -period 72h -orphan | awk '/token /{print $2}')
|
||||
|
||||
cat <<EOF > "${DIR}/../keys/nomad_vault.hcl"
|
||||
vault {
|
||||
enabled = true
|
||||
address = "https://active.vault.service.consul:8200"
|
||||
task_token_ttl = "1h"
|
||||
create_from_role = "nomad-cluster"
|
||||
token = "$NOMAD_VAULT_TOKEN"
|
||||
ca_file = "/etc/vault.d/tls/ca.crt"
|
||||
cert_file = "/etc/vault.d/tls/agent.crt"
|
||||
key_file = "/etc/vault.d/tls/agent.key"
|
||||
}
|
||||
|
||||
EOF
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"disallowed_policies": "nomad-server",
|
||||
"token_explicit_max_ttl": 0,
|
||||
"name": "nomad-cluster",
|
||||
"orphan": true,
|
||||
"token_period": 259200,
|
||||
"renewable": true
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
region = "us-east-1"
|
||||
instance_type = "t3.medium"
|
||||
server_count = "3"
|
||||
client_count_ubuntu_bionic_amd64 = "4"
|
||||
client_count_windows_2016_amd64 = "1"
|
||||
profile = "full-cluster"
|
||||
nomad_enterprise = true
|
||||
nomad_acls = true
|
||||
vault = true
|
||||
volumes = true
|
||||
tls = true
|
||||
|
||||
# required to avoid picking up defaults from terraform.tfvars file
|
||||
nomad_version = "" # default version for deployment
|
||||
nomad_local_binary = "" # overrides nomad_version if set
|
||||
|
||||
# The nightly E2E runner will set a nomad_sha flag; this should not be used
|
||||
# outside of the nightly E2E runner and will usually fail because the build
|
||||
# will not be available
|
||||
@@ -1,23 +1,12 @@
|
||||
region = "us-east-1"
|
||||
instance_type = "t3.medium"
|
||||
server_count = "3"
|
||||
client_count_ubuntu_bionic_amd64 = "2"
|
||||
client_count_windows_2016_amd64 = "0"
|
||||
profile = "dev-cluster"
|
||||
nomad_acls = false
|
||||
nomad_enterprise = false
|
||||
vault = true
|
||||
volumes = false
|
||||
tls = true
|
||||
client_count_ubuntu_bionic_amd64 = "4"
|
||||
client_count_windows_2016_amd64 = "1"
|
||||
volumes = true
|
||||
|
||||
nomad_version = "1.0.1" # default version for deployment
|
||||
nomad_local_binary = "" # overrides nomad_version if set
|
||||
nomad_url = "" # overrides nomad_version if set
|
||||
nomad_local_binary = "../../pkg/linux_amd64/nomad"
|
||||
nomad_local_binary_client_windows_2016_amd64 = ["../../pkg/windows_amd64/nomad.exe"]
|
||||
|
||||
# Example overrides:
|
||||
# nomad_local_binary = "../../pkg/linux_amd64/nomad"
|
||||
# nomad_local_binary_client_windows_2016_amd64 = ["../../pkg/windows_amd64/nomad.exe"]
|
||||
|
||||
# The nightly E2E runner will set a nomad_sha flag; this should not be used
|
||||
# outside of the nightly E2E runner and will usually fail because the build
|
||||
# will not be available
|
||||
# For testing enterprise, set via --var:
|
||||
# nomad_license = <content of Nomad license>
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"provisioning": {
|
||||
"nomad_client_linux[0]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster",
|
||||
"nomad_client_linux[1]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster",
|
||||
"nomad_client_linux[2]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster",
|
||||
"nomad_client_linux[3]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster",
|
||||
"nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_version 0.12.1 -config_profile dev-cluster",
|
||||
"nomad_server[0]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster",
|
||||
"nomad_server[1]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster",
|
||||
"nomad_server[2]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster"
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
# test: install a public Nomad release
|
||||
profile = "dev-cluster"
|
||||
|
||||
server_count = 3
|
||||
client_count = 4
|
||||
windows_client_count = 1
|
||||
|
||||
nomad_version = "0.12.1"
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"provisioning": {
|
||||
"nomad_client_linux[0]": "/opt/provision.sh --nomad_version 0.12.0 --config_profile dev-cluster",
|
||||
"nomad_client_linux[1]": "/opt/provision.sh --nomad_version 0.12.3 --config_profile dev-cluster",
|
||||
"nomad_client_linux[2]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster",
|
||||
"nomad_client_linux[3]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster",
|
||||
"nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_version 0.12.3 -config_profile dev-cluster",
|
||||
"nomad_server[0]": "/opt/provision.sh --nomad_version 0.12.0 --config_profile dev-cluster",
|
||||
"nomad_server[1]": "/opt/provision.sh --nomad_version 0.12.3 --config_profile dev-cluster",
|
||||
"nomad_server[2]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster"
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
# test: install a public Nomad release with overrides
|
||||
profile = "dev-cluster"
|
||||
|
||||
server_count = 3
|
||||
client_count = 4
|
||||
windows_client_count = 1
|
||||
|
||||
nomad_version = "0.12.1"
|
||||
nomad_version_server = [
|
||||
"0.12.0", # override servers 1 and 2
|
||||
"0.12.3",
|
||||
]
|
||||
nomad_version_client_linux = [
|
||||
"0.12.0", # override linux client 1 and 2
|
||||
"0.12.3"
|
||||
]
|
||||
nomad_version_client_windows = [
|
||||
"0.12.3", # override windows client 1
|
||||
"0.12.4" # ignored
|
||||
]
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"provisioning": {
|
||||
"nomad_client_linux[0]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster",
|
||||
"nomad_client_linux[1]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster",
|
||||
"nomad_client_linux[2]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster",
|
||||
"nomad_client_linux[3]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster",
|
||||
"nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf -config_profile dev-cluster",
|
||||
"nomad_server[0]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster",
|
||||
"nomad_server[1]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster",
|
||||
"nomad_server[2]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster"
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
# test: install a specific Nomad sha
|
||||
profile = "dev-cluster"
|
||||
|
||||
server_count = 3
|
||||
client_count = 4
|
||||
windows_client_count = 1
|
||||
|
||||
nomad_sha = "2a6e62be00a0db228d8add74ceca6ca83c8efdcf"
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"provisioning": {
|
||||
"nomad_client_linux[0]": "/opt/provision.sh --nomad_sha 920f00da22726914e504d016bb588ca9c18240f2 --config_profile dev-cluster",
|
||||
"nomad_client_linux[1]": "/opt/provision.sh --nomad_sha 568c4aa72b51050913365dae6b3b1d089d39b2a5 --config_profile dev-cluster",
|
||||
"nomad_client_linux[2]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster",
|
||||
"nomad_client_linux[3]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster",
|
||||
"nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_sha 920f00da22726914e504d016bb588ca9c18240f2 -config_profile dev-cluster",
|
||||
"nomad_server[0]": "/opt/provision.sh --nomad_sha 920f00da22726914e504d016bb588ca9c18240f2 --config_profile dev-cluster",
|
||||
"nomad_server[1]": "/opt/provision.sh --nomad_sha 568c4aa72b51050913365dae6b3b1d089d39b2a5 --config_profile dev-cluster",
|
||||
"nomad_server[2]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster"
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
# test: install a specific Nomad sha with overrides
|
||||
profile = "dev-cluster"
|
||||
|
||||
server_count = 3
|
||||
client_count = 4
|
||||
windows_client_count = 1
|
||||
|
||||
nomad_sha = "2a6e62be00a0db228d8add74ceca6ca83c8efdcf"
|
||||
nomad_sha_server = [
|
||||
"920f00da22726914e504d016bb588ca9c18240f2", # override server 1 and 2
|
||||
"568c4aa72b51050913365dae6b3b1d089d39b2a5",
|
||||
]
|
||||
nomad_sha_client_linux = [
|
||||
"920f00da22726914e504d016bb588ca9c18240f2", # override client 1 and 2
|
||||
"568c4aa72b51050913365dae6b3b1d089d39b2a5",
|
||||
]
|
||||
nomad_sha_client_windows = [
|
||||
"920f00da22726914e504d016bb588ca9c18240f2", # override windows client
|
||||
"568c4aa72b51050913365dae6b3b1d089d39b2a5", # ignored
|
||||
]
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"provisioning": {
|
||||
"nomad_client_linux[0]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster",
|
||||
"nomad_client_linux[1]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster",
|
||||
"nomad_client_linux[2]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster",
|
||||
"nomad_client_linux[3]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster",
|
||||
"nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_binary ./mock-1 -config_profile dev-cluster",
|
||||
"nomad_server[0]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster",
|
||||
"nomad_server[1]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster",
|
||||
"nomad_server[2]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster"
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
# test: install a local Nomad binary
|
||||
profile = "dev-cluster"
|
||||
|
||||
server_count = 3
|
||||
client_count = 4
|
||||
windows_client_count = 1
|
||||
|
||||
nomad_local_binary = "./mock-1"
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"provisioning": {
|
||||
"nomad_client_linux[0]": "/opt/provision.sh --nomad_binary ./mock-2 --config_profile dev-cluster",
|
||||
"nomad_client_linux[1]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster",
|
||||
"nomad_client_linux[2]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster",
|
||||
"nomad_client_linux[3]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster",
|
||||
"nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_binary ./mock-2 -config_profile dev-cluster",
|
||||
"nomad_server[0]": "/opt/provision.sh --nomad_binary ./mock-2 --config_profile dev-cluster",
|
||||
"nomad_server[1]": "/opt/provision.sh --nomad_binary ./mock-2 --config_profile dev-cluster",
|
||||
"nomad_server[2]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster"
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
# test: install a local Nomad binary, with overrides
|
||||
profile = "dev-cluster"
|
||||
|
||||
server_count = 3
|
||||
client_count = 4
|
||||
windows_client_count = 1
|
||||
|
||||
nomad_local_binary = "./mock-1"
|
||||
nomad_local_binary_server = [
|
||||
"./mock-2", # override servers 1 and 2
|
||||
"./mock-2",
|
||||
]
|
||||
nomad_local_binary_client_linux = [
|
||||
"./mock-2" # override client 1
|
||||
]
|
||||
nomad_local_binary_client_windows = [
|
||||
"./mock-2" # override windows client
|
||||
]
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"provisioning": {
|
||||
"nomad_client_linux[0]": "/opt/provision.sh --nomad_version 0.12.0 --config_profile full-cluster",
|
||||
"nomad_client_linux[1]": "/opt/provision.sh --nomad_version 0.12.3 --config_profile full-cluster",
|
||||
"nomad_client_linux[2]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile full-cluster",
|
||||
"nomad_client_linux[3]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile full-cluster",
|
||||
"nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_version 0.12.0 -config_profile full-cluster",
|
||||
"nomad_server[0]": "/opt/provision.sh --nomad_version 0.12.0 --config_profile full-cluster",
|
||||
"nomad_server[1]": "/opt/provision.sh --nomad_version 0.12.3 --config_profile full-cluster",
|
||||
"nomad_server[2]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile full-cluster"
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
# test: install a specific Nomad version with indexed configs
|
||||
profile = "full-cluster"
|
||||
|
||||
server_count = 3
|
||||
client_count = 4
|
||||
windows_client_count = 1
|
||||
|
||||
nomad_version = "0.12.1"
|
||||
nomad_version_server = [
|
||||
"0.12.0", # override servers 1 and 2
|
||||
"0.12.3",
|
||||
]
|
||||
nomad_version_client_linux = [
|
||||
"0.12.0", # override clients 1 and 2
|
||||
"0.12.3",
|
||||
]
|
||||
nomad_version_client_windows = [
|
||||
"0.12.0", # override windows client
|
||||
"0.12.3", # ignored
|
||||
]
|
||||
@@ -1 +0,0 @@
|
||||
../config
|
||||
@@ -1 +0,0 @@
|
||||
this is a mock file so that we can verify the checksum of a known file for testing
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user