mirror of
https://github.com/kemko/nomad.git
synced 2026-01-01 16:05:42 +03:00
Initial commit for Terraform/AWS support
This commit is contained in:
6
terraform/README.md
Normal file
6
terraform/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Provision a Nomad cluster with Terraform
|
||||
|
||||
Easily provision a fully functional and integrated HashiCorp sandbox environment in the cloud. The goal is to allow easy exploration of Nomad, including the integrations with Consul and Vault. A number of [examples] (examples/README.md) are included.
|
||||
|
||||
See the README in the [AWS] (aws/README.md) subdirectory to get started.
|
||||
|
||||
96
terraform/aws/README.md
Normal file
96
terraform/aws/README.md
Normal file
@@ -0,0 +1,96 @@
|
||||
## Deploy a Nomad cluster in AWS
|
||||
|
||||
Deploys one or more servers running Nomad, Consul and Vault as well a configurable number of clients.
|
||||
|
||||
### Setup
|
||||
|
||||
Clone the repo and (optionally) use the included Vagrantfile to bootstrap a local staging environment:
|
||||
|
||||
```bash
|
||||
$ git clone git@github.com:hashicorp/nomad.git
|
||||
$ cd terraform/aws
|
||||
$ vagrant up && vagrant ssh
|
||||
```
|
||||
|
||||
### Pre-requisites
|
||||
|
||||
You will need the following:
|
||||
|
||||
- AWS account
|
||||
- [API access keys](http://aws.amazon.com/developers/access-keys/)
|
||||
- [SSH key pair](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
|
||||
|
||||
Set the following environment variables:
|
||||
|
||||
```bash
|
||||
$ export AWS_ACCESS_KEY_ID=[ACCESS_KEY_ID]
|
||||
$ export AWS_SECRET_ACCESS_KEY=[SECRET_ACCESS_KEY]
|
||||
```
|
||||
|
||||
If you provisioned a Vagrant environment, you will need to copy your private key to it.
|
||||
|
||||
### Provision
|
||||
|
||||
`cd` to one of the environment subdirectories:
|
||||
|
||||
```bash
|
||||
$ cd aws/env/us-east
|
||||
```
|
||||
|
||||
Update terraform.tfvars with your SSH key name:
|
||||
|
||||
```bash
|
||||
region = "us-east-1"
|
||||
ami = "ami-62a60374"
|
||||
instance_type = "t2.small"
|
||||
key_name = "KEY"
|
||||
key_file = "/home/vagrant/.ssh/KEY.pem"
|
||||
server_count = "1"
|
||||
client_count = "2"
|
||||
```
|
||||
For example:
|
||||
|
||||
```bash
|
||||
region = "us-east-1"
|
||||
ami = "ami-62a60374"
|
||||
instance_type = "t2.medium"
|
||||
key_name = "hashi-us-east-1"
|
||||
key_file = "/home/vagrant/.ssh/hashi-us-east-1.pem"
|
||||
server_count = "1"
|
||||
client_count = "2"
|
||||
```
|
||||
|
||||
Provision:
|
||||
|
||||
```bash
|
||||
terraform get
|
||||
terraform plan
|
||||
terraform apply
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
SSH to a server using its public IP. For example:
|
||||
|
||||
```bash
|
||||
$ ssh -i /home/vagrant/.ssh/KEY.pem ubuntu@SERVER_PUBLIC_IP
|
||||
```
|
||||
|
||||
Optionally, initialize and Unseal Vault:
|
||||
|
||||
```bash
|
||||
$ vault init -key-shares=1 -key-threshold=1
|
||||
$ vault unseal
|
||||
$ export VAULT_TOKEN=[INITIAL_ROOT_TOKEN]
|
||||
```
|
||||
|
||||
Test Consul, Nomad:
|
||||
|
||||
```bash
|
||||
$ consul members
|
||||
$ nomad server-members
|
||||
$ nomad node-status
|
||||
```
|
||||
|
||||
See the [examples](../examples/README.md).
|
||||
|
||||
60
terraform/aws/Vagrantfile
vendored
Normal file
60
terraform/aws/Vagrantfile
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# All Vagrant configuration is done below. The "2" in Vagrant.configure
|
||||
# configures the configuration version (we support older styles for
|
||||
# backwards compatibility). Please don't change it unless you know what
|
||||
# you're doing.
|
||||
Vagrant.configure(2) do |config|
|
||||
|
||||
config.vm.box = "ubuntu/trusty64"
|
||||
# config.vm.network :forwarded_port, guest: 22, host: 1234
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
|
||||
cd /tmp
|
||||
|
||||
PACKERVERSION=0.12.1
|
||||
PACKERDOWNLOAD=https://releases.hashicorp.com/packer/${PACKERVERSION}/packer_${PACKERVERSION}_linux_amd64.zip
|
||||
TERRAFORMVERSION=0.8.1
|
||||
TERRAFORMDOWNLOAD=https://releases.hashicorp.com/terraform/${TERRAFORMVERSION}/terraform_${TERRAFORMVERSION}_linux_amd64.zip
|
||||
|
||||
echo "Dependencies..."
|
||||
sudo apt-get install -y unzip tree
|
||||
|
||||
# Disable the firewall
|
||||
sudo ufw disable
|
||||
|
||||
## Packer
|
||||
echo Fetching Packer...
|
||||
curl -L $PACKERDOWNLOAD > packer.zip
|
||||
echo Installing Packer...
|
||||
unzip packer.zip -d /usr/local/bin
|
||||
chmod 0755 /usr/local/bin/packer
|
||||
chown root:root /usr/local/bin/packer
|
||||
|
||||
## Terraform
|
||||
echo Fetching Terraform...
|
||||
curl -L $TERRAFORMDOWNLOAD > terraform.zip
|
||||
echo Installing Terraform...
|
||||
unzip terraform.zip -d /usr/local/bin
|
||||
chmod 0755 /usr/local/bin/terraform
|
||||
chown root:root /usr/local/bin/terraform
|
||||
|
||||
## Docker
|
||||
echo deb https://apt.dockerproject.org/repo ubuntu-`lsb_release -c | awk '{print $2}'` main | sudo tee /etc/apt/sources.list.d/docker.list
|
||||
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y docker-engine
|
||||
|
||||
SHELL
|
||||
|
||||
config.vm.synced_folder "../aws/", "/home/vagrant/aws", owner: "vagrant", group: "vagrant"
|
||||
config.vm.synced_folder "../shared/", "/home/vagrant/shared", owner: "vagrant", group: "vagrant"
|
||||
config.vm.synced_folder "../examples/", "/home/vagrant/examples", owner: "vagrant", group: "vagrant"
|
||||
|
||||
# config.vm.provider "virtualbox" do |vb|
|
||||
# vb.memory = "2048"
|
||||
# vb.cpus = 2
|
||||
#end
|
||||
|
||||
end
|
||||
53
terraform/aws/env/us-east/main.tf
vendored
Normal file
53
terraform/aws/env/us-east/main.tf
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
variable "region" {
|
||||
description = "The AWS region to deploy to."
|
||||
default = "us-east-1"
|
||||
}
|
||||
|
||||
variable "ami" { }
|
||||
|
||||
variable "instance_type" {
|
||||
description = "The AWS instance type to use for both clients and servers."
|
||||
default = "t2.medium"
|
||||
}
|
||||
|
||||
variable "key_name" { }
|
||||
|
||||
variable "key_file" { }
|
||||
|
||||
variable "server_count" {
|
||||
description = "The number of servers to provision."
|
||||
default = "3"
|
||||
}
|
||||
|
||||
variable "client_count" {
|
||||
description = "The number of clients to provision."
|
||||
default = "4"
|
||||
}
|
||||
|
||||
variable "cluster_tag_value" {
|
||||
description = "Used by Consul to automatically form a cluster."
|
||||
default = "auto-join"
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = "${var.region}"
|
||||
}
|
||||
|
||||
module "hashistack" {
|
||||
|
||||
source = "../../modules/hashistack"
|
||||
|
||||
region = "${var.region}"
|
||||
ami = "${var.ami}"
|
||||
instance_type = "${var.instance_type}"
|
||||
key_name = "${var.key_name}"
|
||||
key_file = "${var.key_file}"
|
||||
server_count = "${var.server_count}"
|
||||
client_count = "${var.client_count}"
|
||||
cluster_tag_value = "${var.cluster_tag_value}"
|
||||
}
|
||||
|
||||
output "primary_server_private_ips" { value = "${module.hashistack.primary_server_private_ips}" }
|
||||
output "primary_server_public_ips" { value = "${module.hashistack.primary_server_public_ips}" }
|
||||
output "client_private_ips" { value = "${module.hashistack.client_private_ips}" }
|
||||
output "client_public_ips" { value = "${module.hashistack.client_public_ips}" }
|
||||
8
terraform/aws/env/us-east/terraform.tfvars
vendored
Normal file
8
terraform/aws/env/us-east/terraform.tfvars
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
region = "us-east-1"
|
||||
ami = "ami-28a1dd3e"
|
||||
instance_type = "t2.micro"
|
||||
key_name = "hashi-us-east-1"
|
||||
key_file = "/home/vagrant/.ssh/hashi-us-east-1.pem"
|
||||
server_count = "1"
|
||||
client_count = "1"
|
||||
cluster_tag_value = "auto-join"
|
||||
6
terraform/aws/env/us-east/user-data-client.sh
vendored
Normal file
6
terraform/aws/env/us-east/user-data-client.sh
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
exec > >(sudo tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
|
||||
sudo bash /ops/shared/scripts/client.sh "${region}" "${cluster_tag_value}"
|
||||
6
terraform/aws/env/us-east/user-data-server.sh
vendored
Normal file
6
terraform/aws/env/us-east/user-data-server.sh
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
exec > >(sudo tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
|
||||
sudo bash /ops/shared/scripts/server.sh "${server_count}" "${region}" "${cluster_tag_value}"
|
||||
149
terraform/aws/modules/hashistack/hashistack.tf
Normal file
149
terraform/aws/modules/hashistack/hashistack.tf
Normal file
@@ -0,0 +1,149 @@
|
||||
variable "region" { }
|
||||
variable "ami" { }
|
||||
variable "instance_type" { }
|
||||
variable "key_name" { }
|
||||
variable "key_file" { }
|
||||
variable "server_count" { }
|
||||
variable "client_count" { }
|
||||
variable "cluster_tag_value" { }
|
||||
|
||||
data "aws_vpc" "default" {
|
||||
default = true
|
||||
}
|
||||
|
||||
resource "aws_security_group" "primary" {
|
||||
name = "hashistack"
|
||||
vpc_id = "${data.aws_vpc.default.id}"
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
self = true
|
||||
}
|
||||
|
||||
egress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
||||
|
||||
data "template_file" "user_data_server_primary" {
|
||||
template = "${file("${path.root}/user-data-server.sh")}"
|
||||
|
||||
vars {
|
||||
server_count = "${var.server_count}"
|
||||
region = "${var.region}"
|
||||
cluster_tag_value = "${var.cluster_tag_value}"
|
||||
}
|
||||
}
|
||||
|
||||
data "template_file" "user_data_client" {
|
||||
template = "${file("${path.root}/user-data-client.sh")}"
|
||||
|
||||
vars {
|
||||
region = "${var.region}"
|
||||
cluster_tag_value = "${var.cluster_tag_value}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_instance" "primary" {
|
||||
ami = "${var.ami}"
|
||||
instance_type = "${var.instance_type}"
|
||||
key_name = "${var.key_name}"
|
||||
vpc_security_group_ids = ["${aws_security_group.primary.id}"]
|
||||
count = "${var.server_count}"
|
||||
connection {
|
||||
user = "ubuntu"
|
||||
private_key = "${file("${var.key_file}")}"
|
||||
}
|
||||
|
||||
#Instance tags
|
||||
tags {
|
||||
Name = "hashistack-server-${count.index}"
|
||||
ConsulAutoJoin = "${var.cluster_tag_value}"
|
||||
}
|
||||
|
||||
user_data = "${data.template_file.user_data_server_primary.rendered}"
|
||||
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
|
||||
|
||||
}
|
||||
|
||||
resource "aws_instance" "client" {
|
||||
ami = "${var.ami}"
|
||||
instance_type = "${var.instance_type}"
|
||||
key_name = "${var.key_name}"
|
||||
vpc_security_group_ids = ["${aws_security_group.primary.id}"]
|
||||
count = "${var.client_count}"
|
||||
depends_on = ["aws_instance.primary"]
|
||||
connection {
|
||||
user = "ubuntu"
|
||||
private_key = "${file("${var.key_file}")}"
|
||||
}
|
||||
|
||||
#Instance tags
|
||||
tags {
|
||||
Name = "hashistack-client-${count.index}"
|
||||
ConsulAutoJoin = "${var.cluster_tag_value}"
|
||||
}
|
||||
|
||||
user_data = "${data.template_file.user_data_client.rendered}"
|
||||
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
|
||||
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "instance_profile" {
|
||||
name_prefix = "hashistack"
|
||||
role = "${aws_iam_role.instance_role.name}"
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "instance_role" {
|
||||
name_prefix = "hashistack"
|
||||
assume_role_policy = "${data.aws_iam_policy_document.instance_role.json}"
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "instance_role" {
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = ["sts:AssumeRole"]
|
||||
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = ["ec2.amazonaws.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "auto_discover_cluster" {
|
||||
name = "auto-discover-cluster"
|
||||
role = "${aws_iam_role.instance_role.id}"
|
||||
policy = "${data.aws_iam_policy_document.auto_discover_cluster.json}"
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "auto_discover_cluster" {
|
||||
statement {
|
||||
effect = "Allow"
|
||||
|
||||
actions = [
|
||||
"ec2:DescribeInstances",
|
||||
"ec2:DescribeTags",
|
||||
"autoscaling:DescribeAutoScalingGroups",
|
||||
]
|
||||
|
||||
resources = ["*"]
|
||||
}
|
||||
}
|
||||
|
||||
output "primary_server_private_ips" { value = ["${aws_instance.primary.*.private_ip}"] }
|
||||
output "primary_server_public_ips" { value = ["${aws_instance.primary.*.public_ip}"] }
|
||||
output "client_private_ips" { value = ["${aws_instance.client.*.private_ip}"] }
|
||||
output "client_public_ips" { value = ["${aws_instance.client.*.public_ip}"] }
|
||||
33
terraform/aws/packer/packer.json
Normal file
33
terraform/aws/packer/packer.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"builders": [{
|
||||
"type": "amazon-ebs",
|
||||
"region": "us-east-1",
|
||||
"source_ami": "ami-8e0b9499",
|
||||
"instance_type": "t2.medium",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "hashicorp-sandbox {{timestamp}}",
|
||||
"ami_groups": ["all"]
|
||||
}],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell",
|
||||
"inline": [
|
||||
"sudo mkdir /ops",
|
||||
"sudo chmod 777 /ops"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "../../shared",
|
||||
"destination": "/ops"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "../../examples",
|
||||
"destination": "/ops"
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"script": "../../shared/scripts/setup.sh"
|
||||
}]
|
||||
}
|
||||
6
terraform/examples/README.md
Normal file
6
terraform/examples/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
## Examples
|
||||
|
||||
The examples included here are designed to introduce specific features and provide a basic learning experience. The examples subdirectory is automatically provisioned into the home directory of the VMs in your [AWS] (../aws/README.md) environment.
|
||||
|
||||
- Nomad
|
||||
- [Spark Integration](spark/README.md)
|
||||
79
terraform/examples/spark/README.md
Normal file
79
terraform/examples/spark/README.md
Normal file
@@ -0,0 +1,79 @@
|
||||
## Spark integration
|
||||
|
||||
`cd` to `examples/spark/spark` on one of the servers. The `spark/spark` subdirectory will be created when the cluster is provisioned.
|
||||
|
||||
You can use the spark-submit commands below to run several of the official Spark examples against Nomad. You can monitor Nomad status simulaneously with:
|
||||
|
||||
```bash
|
||||
$ nomad status
|
||||
$ nomad status [JOB_ID]
|
||||
$ nomad alloc-status [ALLOC_ID]
|
||||
```
|
||||
|
||||
### SparkPi
|
||||
|
||||
Java
|
||||
|
||||
```bash
|
||||
$ ./bin/spark-submit --class org.apache.spark.examples.JavaSparkPi --master nomad --conf spark.executor.instances=8 --conf spark.nomad.sparkDistribution=https://s3.amazonaws.com/rcgenova-nomad-spark/spark-2.1.0-SNAPSHOT-bin-nomad-spark.tgz examples/jars/spark-examples*.jar 100
|
||||
```
|
||||
|
||||
Python
|
||||
|
||||
```bash
|
||||
$ ./bin/spark-submit --master nomad --conf spark.executor.instances=8 --conf spark.nomad.sparkDistribution=https://s3.amazonaws.com/rcgenova-nomad-spark/spark-2.1.0-SNAPSHOT-bin-nomad-spark.tgz examples/src/main/python/pi.py 100
|
||||
```
|
||||
|
||||
Scala
|
||||
|
||||
```bash
|
||||
$ ./bin/spark-submit --class org.apache.spark.examples.SparkPi --master nomad --conf spark.executor.instances=8 --conf spark.nomad.sparkDistribution=https://s3.amazonaws.com/rcgenova-nomad-spark/spark-2.1.0-SNAPSHOT-bin-nomad-spark.tgz examples/jars/spark-examples*.jar 100
|
||||
```
|
||||
|
||||
## Machine Learning
|
||||
|
||||
Python
|
||||
|
||||
```bash
|
||||
$ ./bin/spark-submit --master nomad --conf spark.executor.instances=8 --conf spark.nomad.sparkDistribution=https://s3.amazonaws.com/rcgenova-nomad-spark/spark-2.1.0-SNAPSHOT-bin-nomad-spark.tgz examples/src/main/python/ml/logistic_regression_with_elastic_net.py
|
||||
```
|
||||
|
||||
Scala
|
||||
|
||||
```bash
|
||||
$ ./bin/spark-submit --class org.apache.spark.examples.SparkLR --master nomad --conf spark.executor.instances=8 --conf spark.nomad.sparkDistribution=https://s3.amazonaws.com/rcgenova-nomad-spark/spark-2.1.0-SNAPSHOT-bin-nomad-spark.tgz examples/jars/spark-examples*.jar
|
||||
```
|
||||
|
||||
## pyspark
|
||||
|
||||
```bash
|
||||
$ ./bin/pyspark --master nomad --conf spark.executor.instances=8 --conf spark.nomad.sparkDistribution=https://s3.amazonaws.com/rcgenova-nomad-spark/spark-2.1.0-SNAPSHOT-bin-nomad-spark.tgz
|
||||
|
||||
df = spark.read.json("examples/src/main/resources/people.json")
|
||||
df.show()
|
||||
df.printSchema()
|
||||
df.createOrReplaceTempView("people")
|
||||
sqlDF = spark.sql("SELECT * FROM people")
|
||||
sqlDF.show()
|
||||
```
|
||||
|
||||
## spark-shell
|
||||
|
||||
```bash
|
||||
$ ./bin/spark-shell --master nomad --conf spark.executor.instances=8 --conf spark.nomad.sparkDistribution=https://s3.amazonaws.com/rcgenova-nomad-spark/spark-2.1.0-SNAPSHOT-bin-nomad-spark.tgz
|
||||
|
||||
:type spark
|
||||
spark.version
|
||||
|
||||
val data = 1 to 10000
|
||||
val distData = sc.parallelize(data)
|
||||
distData.filter(_ < 10).collect()
|
||||
```
|
||||
|
||||
## spark-sql
|
||||
|
||||
```bash
|
||||
$ ./bin/spark-sql --master nomad --conf spark.executor.instances=8 --conf spark.nomad.sparkDistribution=https://s3.amazonaws.com/rcgenova-nomad-spark/spark-2.1.0-SNAPSHOT-bin-nomad-spark.tgz jars/spark-sql_2.11-2.1.0-SNAPSHOT.jar
|
||||
```
|
||||
|
||||
|
||||
BIN
terraform/examples/spark/RunningSparkOnNomad.pdf
Normal file
BIN
terraform/examples/spark/RunningSparkOnNomad.pdf
Normal file
Binary file not shown.
18
terraform/shared/config/consul.json
Normal file
18
terraform/shared/config/consul.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"log_level": "INFO",
|
||||
"server": true,
|
||||
"data_dir": "/opt/consul/data",
|
||||
"bind_addr": "0.0.0.0",
|
||||
"client_addr": "IP_ADDRESS",
|
||||
"advertise_addr": "IP_ADDRESS",
|
||||
"bootstrap_expect": SERVER_COUNT,
|
||||
"node_name": "consul@IP_ADDRESS",
|
||||
"service": {
|
||||
"name": "consul"
|
||||
},
|
||||
"retry_join_ec2": {
|
||||
"tag_key": "ConsulAutoJoin",
|
||||
"tag_value": "CLUSTER_TAG_VALUE",
|
||||
"region": "REGION"
|
||||
}
|
||||
}
|
||||
12
terraform/shared/config/consul_client.json
Normal file
12
terraform/shared/config/consul_client.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"log_level": "INFO",
|
||||
"data_dir": "/opt/consul/data",
|
||||
"bind_addr": "0.0.0.0",
|
||||
"advertise_addr": "IP_ADDRESS",
|
||||
"node_name": "consul@IP_ADDRESS",
|
||||
"retry_join_ec2": {
|
||||
"tag_key": "ConsulAutoJoin",
|
||||
"tag_value": "CLUSTER_TAG_VALUE",
|
||||
"region": "REGION"
|
||||
}
|
||||
}
|
||||
22
terraform/shared/config/consul_upstart.conf
Normal file
22
terraform/shared/config/consul_upstart.conf
Normal file
@@ -0,0 +1,22 @@
|
||||
description "Consul"
|
||||
|
||||
start on runlevel [2345]
|
||||
stop on runlevel [!2345]
|
||||
|
||||
respawn
|
||||
|
||||
console log
|
||||
|
||||
script
|
||||
if [ -f "/etc/service/consul" ]; then
|
||||
. /etc/service/consul
|
||||
fi
|
||||
|
||||
# Make sure to use all our CPUs, because Consul can block a scheduler thread
|
||||
export GOMAXPROCS=`nproc`
|
||||
|
||||
exec /usr/local/bin/consul agent \
|
||||
-config-dir="/etc/consul.d" \
|
||||
\$${CONSUL_FLAGS} \
|
||||
>>/var/log/consul.log 2>&1
|
||||
end script
|
||||
23
terraform/shared/config/nomad.hcl
Normal file
23
terraform/shared/config/nomad.hcl
Normal file
@@ -0,0 +1,23 @@
|
||||
data_dir = "/opt/nomad/data"
|
||||
bind_addr = "IP_ADDRESS"
|
||||
|
||||
# Enable the server
|
||||
server {
|
||||
enabled = true
|
||||
bootstrap_expect = SERVER_COUNT
|
||||
}
|
||||
|
||||
name = "nomad@IP_ADDRESS"
|
||||
|
||||
consul {
|
||||
address = "IP_ADDRESS:8500"
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = false
|
||||
address = "http://IP_ADDRESS:8200"
|
||||
task_token_ttl = "1h"
|
||||
create_from_role = "nomad-cluster"
|
||||
token = ""
|
||||
}
|
||||
|
||||
28
terraform/shared/config/nomad_client.hcl
Normal file
28
terraform/shared/config/nomad_client.hcl
Normal file
@@ -0,0 +1,28 @@
|
||||
data_dir = "/opt/nomad/data"
|
||||
bind_addr = "IP_ADDRESS"
|
||||
name = "nomad@IP_ADDRESS"
|
||||
|
||||
# Enable the client
|
||||
client {
|
||||
enabled = true
|
||||
chroot_env {
|
||||
"/bin" = "/bin"
|
||||
"/etc" = "/etc"
|
||||
"/home" = "/home"
|
||||
"/lib" = "/lib"
|
||||
"/lib32" = "/lib32"
|
||||
"/lib64" = "/lib64"
|
||||
"/run/resolvconf" = "/run/resolvconf"
|
||||
"/sbin" = "/sbin"
|
||||
"/usr" = "/usr"
|
||||
}
|
||||
}
|
||||
|
||||
consul {
|
||||
address = "127.0.0.1:8500"
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "http://SERVER_IP_ADDRESS:8200"
|
||||
}
|
||||
22
terraform/shared/config/nomad_upstart.conf
Normal file
22
terraform/shared/config/nomad_upstart.conf
Normal file
@@ -0,0 +1,22 @@
|
||||
description "Nomad"
|
||||
|
||||
start on runlevel [2345]
|
||||
stop on runlevel [!2345]
|
||||
|
||||
respawn
|
||||
|
||||
console log
|
||||
|
||||
script
|
||||
if [ -f "/etc/service/nomad" ]; then
|
||||
. /etc/service/nomad
|
||||
fi
|
||||
|
||||
# Make sure to use all our CPUs, because Nomad can block a scheduler thread
|
||||
export GOMAXPROCS=`nproc`
|
||||
|
||||
exec /usr/local/bin/nomad agent \
|
||||
-config="/etc/nomad.d/nomad.hcl" \
|
||||
\$${NOMAD_FLAGS} \
|
||||
>>/var/log/nomad.log 2>&1
|
||||
end script
|
||||
12
terraform/shared/config/vault.hcl
Normal file
12
terraform/shared/config/vault.hcl
Normal file
@@ -0,0 +1,12 @@
|
||||
backend "consul" {
|
||||
path = "vault/"
|
||||
address = "IP_ADDRESS:8500"
|
||||
cluster_addr = "https://IP_ADDRESS:8201"
|
||||
redirect_addr = "http://IP_ADDRESS:8200"
|
||||
}
|
||||
|
||||
listener "tcp" {
|
||||
address = "IP_ADDRESS:8200"
|
||||
cluster_address = "IP_ADDRESS:8201"
|
||||
tls_disable = 1
|
||||
}
|
||||
22
terraform/shared/config/vault_upstart.conf
Normal file
22
terraform/shared/config/vault_upstart.conf
Normal file
@@ -0,0 +1,22 @@
|
||||
description "Vault"
|
||||
|
||||
start on runlevel [2345]
|
||||
stop on runlevel [!2345]
|
||||
|
||||
respawn
|
||||
|
||||
console log
|
||||
|
||||
script
|
||||
if [ -f "/etc/service/vault" ]; then
|
||||
. /etc/service/vault
|
||||
fi
|
||||
|
||||
# Make sure to use all our CPUs, because Vault can block a scheduler thread
|
||||
export GOMAXPROCS=`nproc`
|
||||
|
||||
exec /usr/local/bin/vault server \
|
||||
-config="/etc/vault.d/vault.hcl" \
|
||||
\$${VAULT_FLAGS} \
|
||||
>>/var/log/vault.log 2>&1
|
||||
end script
|
||||
45
terraform/shared/scripts/client.sh
Normal file
45
terraform/shared/scripts/client.sh
Normal file
@@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
CONFIGDIR=/ops/shared/config
|
||||
CONSULCONFIGDIR=/etc/consul.d
|
||||
NOMADCONFIGDIR=/etc/nomad.d
|
||||
HOME_DIR=ubuntu
|
||||
|
||||
IP_ADDRESS=$(curl http://instance-data/latest/meta-data/local-ipv4)
|
||||
REGION=$1
|
||||
CLUSTER_TAG_VALUE=$2
|
||||
|
||||
# Consul
|
||||
sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/consul_client.json
|
||||
sed -i "s/REGION/$REGION/g" $CONFIGDIR/consul_client.json
|
||||
sed -i "s/CLUSTER_TAG_VALUE/$CLUSTER_TAG_VALUE/g" $CONFIGDIR/consul_client.json
|
||||
sudo cp $CONFIGDIR/consul_client.json $CONSULCONFIGDIR/consul.json
|
||||
sudo cp $CONFIGDIR/consul_upstart.conf /etc/init/consul.conf
|
||||
|
||||
sudo service consul start
|
||||
sleep 10
|
||||
|
||||
# Nomad
|
||||
sed -i "s/SERVER_IP_ADDRESS/$SERVER_IP_ADDRESS/g" $CONFIGDIR/nomad_client.hcl
|
||||
sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/nomad_client.hcl
|
||||
sudo cp $CONFIGDIR/nomad_client.hcl $NOMADCONFIGDIR/nomad.hcl
|
||||
sudo cp $CONFIGDIR/nomad_upstart.conf /etc/init/nomad.conf
|
||||
|
||||
sudo service nomad start
|
||||
sleep 10
|
||||
export NOMAD_ADDR=http://$IP_ADDRESS:4646
|
||||
|
||||
# Set env vars in bashrc
|
||||
|
||||
echo "export VAULT_ADDR=http://$IP_ADDRESS:8200" | sudo tee --append /home/$HOME_DIR/.bashrc
|
||||
echo "export NOMAD_ADDR=http://$IP_ADDRESS:4646" | sudo tee --append /home/$HOME_DIR/.bashrc
|
||||
echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/jre" | sudo tee --append /home/$HOME_DIR/.bashrc
|
||||
|
||||
# Move examples directory to $HOME
|
||||
|
||||
sudo mv /ops/examples /home/$HOME_DIR
|
||||
sudo chown -R $HOME_DIR:$HOME_DIR /home/$HOME_DIR/examples
|
||||
sudo chmod -R 775 /home/$HOME_DIR/examples
|
||||
|
||||
# Copy transcode.sh to /usr/bin
|
||||
# sudo cp /home/$HOME_DIR/examples/nomad/dispatch/bin/transcode.sh /usr/bin/transcode.sh
|
||||
53
terraform/shared/scripts/server.sh
Normal file
53
terraform/shared/scripts/server.sh
Normal file
@@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
|
||||
CONFIGDIR=/ops/shared/config
|
||||
CONSULCONFIGDIR=/etc/consul.d
|
||||
VAULTCONFIGDIR=/etc/vault.d
|
||||
NOMADCONFIGDIR=/etc/nomad.d
|
||||
HOME_DIR=ubuntu
|
||||
|
||||
IP_ADDRESS=$(curl http://instance-data/latest/meta-data/local-ipv4)
|
||||
SERVER_COUNT=$1
|
||||
REGION=$2
|
||||
CLUSTER_TAG_VALUE=$3
|
||||
|
||||
# Consul
|
||||
sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/consul.json
|
||||
sed -i "s/SERVER_COUNT/$SERVER_COUNT/g" $CONFIGDIR/consul.json
|
||||
sed -i "s/REGION/$REGION/g" $CONFIGDIR/consul.json
|
||||
sed -i "s/CLUSTER_TAG_VALUE/$CLUSTER_TAG_VALUE/g" $CONFIGDIR/consul.json
|
||||
sudo cp $CONFIGDIR/consul.json $CONSULCONFIGDIR
|
||||
sudo cp $CONFIGDIR/consul_upstart.conf /etc/init/consul.conf
|
||||
|
||||
sudo service consul start
|
||||
sleep 20
|
||||
export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500
|
||||
export CONSUL_RPC_ADDR=$IP_ADDRESS:8400
|
||||
|
||||
# Vault
|
||||
sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/vault.hcl
|
||||
sudo cp $CONFIGDIR/vault.hcl $VAULTCONFIGDIR
|
||||
sudo cp $CONFIGDIR/vault_upstart.conf /etc/init/vault.conf
|
||||
|
||||
sudo service vault start
|
||||
|
||||
# Nomad
|
||||
sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/nomad.hcl
|
||||
sed -i "s/SERVER_COUNT/$SERVER_COUNT/g" $CONFIGDIR/nomad.hcl
|
||||
sudo cp $CONFIGDIR/nomad.hcl $NOMADCONFIGDIR
|
||||
sudo cp $CONFIGDIR/nomad_upstart.conf /etc/init/nomad.conf
|
||||
|
||||
sudo service nomad start
|
||||
sleep 10
|
||||
export NOMAD_ADDR=http://$IP_ADDRESS:4646
|
||||
|
||||
echo "export CONSUL_RPC_ADDR=$IP_ADDRESS:8400" | sudo tee --append /home/$HOME_DIR/.bashrc
|
||||
echo "export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500" | sudo tee --append /home/$HOME_DIR/.bashrc
|
||||
echo "export VAULT_ADDR=http://$IP_ADDRESS:8200" | sudo tee --append /home/$HOME_DIR/.bashrc
|
||||
echo "export NOMAD_ADDR=http://$IP_ADDRESS:4646" | sudo tee --append /home/$HOME_DIR/.bashrc
|
||||
echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/jre" | sudo tee --append /home/$HOME_DIR/.bashrc
|
||||
|
||||
# Move examples directory to $HOME
|
||||
sudo mv /ops/examples /home/$HOME_DIR
|
||||
sudo chown -R $HOME_DIR:$HOME_DIR /home/$HOME_DIR/examples
|
||||
|
||||
124
terraform/shared/scripts/setup.sh
Normal file
124
terraform/shared/scripts/setup.sh
Normal file
@@ -0,0 +1,124 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd /ops
|
||||
|
||||
CONFIGDIR=/ops/shared/config
|
||||
|
||||
CONSULVERSION=0.7.5
|
||||
CONSULDOWNLOAD=https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip
|
||||
CONSULCONFIGDIR=/etc/consul.d
|
||||
CONSULDIR=/opt/consul
|
||||
|
||||
VAULTVERSION=0.6.5
|
||||
VAULTDOWNLOAD=https://releases.hashicorp.com/vault/${VAULTVERSION}/vault_${VAULTVERSION}_linux_amd64.zip
|
||||
VAULTCONFIGDIR=/etc/vault.d
|
||||
VAULTDIR=/opt/vault
|
||||
|
||||
NOMADVERSION=0.5.6
|
||||
NOMADDOWNLOAD=https://releases.hashicorp.com/nomad/${NOMADVERSION}/nomad_${NOMADVERSION}_linux_amd64.zip
|
||||
NOMADCONFIGDIR=/etc/nomad.d
|
||||
NOMADDIR=/opt/nomad
|
||||
|
||||
PACKERVERSION=0.12.3
|
||||
PACKERDOWNLOAD=https://releases.hashicorp.com/packer/${PACKERVERSION}/packer_${PACKERVERSION}_linux_amd64.zip
|
||||
|
||||
echo Dependencies...
|
||||
sudo apt-get install -y software-properties-common
|
||||
sudo add-apt-repository ppa:mc3man/trusty-media
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y unzip tree redis-tools jq s3cmd ffmpeg
|
||||
|
||||
# Numpy
|
||||
|
||||
sudo apt-get install -y python-setuptools
|
||||
sudo easy_install pip
|
||||
sudo pip install numpy
|
||||
|
||||
# Instead of symlink, move ffmpeg to be inside the chroot for Nomad
|
||||
sudo rm /usr/bin/ffmpeg
|
||||
sudo cp /opt/ffmpeg/bin/ffmpeg /usr/bin/ffmpeg
|
||||
|
||||
# Disable the firewall
|
||||
|
||||
sudo ufw disable
|
||||
|
||||
## Consul
|
||||
|
||||
echo Fetching Consul...
|
||||
curl -L $CONSULDOWNLOAD > consul.zip
|
||||
|
||||
echo Installing Consul...
|
||||
sudo unzip consul.zip -d /usr/local/bin
|
||||
sudo chmod 0755 /usr/local/bin/consul
|
||||
sudo chown root:root /usr/local/bin/consul
|
||||
|
||||
echo Configuring Consul...
|
||||
sudo mkdir -p $CONSULCONFIGDIR
|
||||
sudo chmod 755 $CONSULCONFIGDIR
|
||||
sudo mkdir -p $CONSULDIR
|
||||
sudo chmod 755 $CONSULDIR
|
||||
|
||||
## Vault
|
||||
|
||||
echo Fetching Vault...
|
||||
curl -L $VAULTDOWNLOAD > vault.zip
|
||||
|
||||
echo Installing Vault...
|
||||
sudo unzip vault.zip -d /usr/local/bin
|
||||
sudo chmod 0755 /usr/local/bin/vault
|
||||
sudo chown root:root /usr/local/bin/vault
|
||||
|
||||
echo Configuring Vault...
|
||||
sudo mkdir -p $VAULTCONFIGDIR
|
||||
sudo chmod 755 $VAULTCONFIGDIR
|
||||
sudo mkdir -p $VAULTDIR
|
||||
sudo chmod 755 $VAULTDIR
|
||||
|
||||
## Nomad
|
||||
|
||||
echo Fetching Nomad...
|
||||
curl -L $NOMADDOWNLOAD > nomad.zip
|
||||
|
||||
echo Installing Nomad...
|
||||
sudo unzip nomad.zip -d /usr/local/bin
|
||||
sudo chmod 0755 /usr/local/bin/nomad
|
||||
sudo chown root:root /usr/local/bin/nomad
|
||||
|
||||
echo Configuring Nomad...
|
||||
sudo mkdir -p $NOMADCONFIGDIR
|
||||
sudo chmod 755 $NOMADCONFIGDIR
|
||||
sudo mkdir -p $NOMADDIR
|
||||
sudo chmod 755 $NOMADDIR
|
||||
|
||||
## Packer
|
||||
|
||||
echo Fetching Packer...
|
||||
curl -L $PACKERDOWNLOAD > packer.zip
|
||||
|
||||
echo Installing Packer...
|
||||
sudo unzip packer.zip -d /usr/local/bin
|
||||
sudo chmod 0755 /usr/local/bin/packer
|
||||
sudo chown root:root /usr/local/bin/packer
|
||||
|
||||
## Docker
|
||||
|
||||
echo deb https://apt.dockerproject.org/repo ubuntu-`lsb_release -c | awk '{print $2}'` main | sudo tee /etc/apt/sources.list.d/docker.list
|
||||
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y docker-engine
|
||||
|
||||
## Java
|
||||
|
||||
sudo add-apt-repository -y ppa:openjdk-r/ppa
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y openjdk-8-jdk
|
||||
JAVA_HOME=$(readlink -f /usr/bin/java | sed "s:bin/java::")
|
||||
|
||||
## Download and unpack spark
|
||||
|
||||
sudo wget -P /ops/examples/spark https://s3.amazonaws.com/rcgenova-nomad-spark/spark-2.1.0-bin-nomad-preview-6.tgz
|
||||
sudo tar -xvf /ops/examples/spark/spark-2.1.0-bin-nomad-preview-6.tgz --directory /ops/examples/spark
|
||||
sudo mv /ops/examples/spark/spark-2.1.0-bin-nomad-preview-6 /ops/examples/spark/spark
|
||||
sudo rm /ops/examples/spark/spark-2.1.0-bin-nomad-preview-6.tgz
|
||||
|
||||
Reference in New Issue
Block a user