mirror of
https://github.com/kemko/nomad.git
synced 2026-01-08 11:25:41 +03:00
base podman e2e test and provisioning updates (#8104)
* initial setup for terrform to install podman task driver podman * Update e2e provisioning to support root podman Excludes setup for rootless podman. updates source ami to ubuntu 18.04 Installs podman and configures podman varlink base podman test ensure client status running revert terraform directory changes * back out random go-discover go mod change * include podman varlink docs * address comments
This commit is contained in:
@@ -20,6 +20,7 @@ import (
|
||||
_ "github.com/hashicorp/nomad/e2e/metrics"
|
||||
_ "github.com/hashicorp/nomad/e2e/nomad09upgrade"
|
||||
_ "github.com/hashicorp/nomad/e2e/nomadexec"
|
||||
_ "github.com/hashicorp/nomad/e2e/podman"
|
||||
_ "github.com/hashicorp/nomad/e2e/spread"
|
||||
_ "github.com/hashicorp/nomad/e2e/systemsched"
|
||||
_ "github.com/hashicorp/nomad/e2e/taskevents"
|
||||
|
||||
28
e2e/podman/input/redis.nomad
Normal file
28
e2e/podman/input/redis.nomad
Normal file
@@ -0,0 +1,28 @@
|
||||
job "redis" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "redis" {
|
||||
task "redis" {
|
||||
driver = "podman"
|
||||
|
||||
config {
|
||||
image = "docker://redis"
|
||||
|
||||
port_map {
|
||||
redis = 6379
|
||||
}
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 500
|
||||
memory = 256
|
||||
|
||||
network {
|
||||
mbits = 20
|
||||
port "redis" {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
78
e2e/podman/podman.go
Normal file
78
e2e/podman/podman.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package podman
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type PodmanTest struct {
|
||||
framework.TC
|
||||
jobIDs []string
|
||||
}
|
||||
|
||||
func init() {
|
||||
framework.AddSuites(&framework.TestSuite{
|
||||
Component: "Podman",
|
||||
CanRunLocal: true,
|
||||
Cases: []framework.TestCase{
|
||||
new(PodmanTest),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (tc *PodmanTest) BeforeAll(f *framework.F) {
|
||||
e2eutil.WaitForLeader(f.T(), tc.Nomad())
|
||||
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 2)
|
||||
}
|
||||
|
||||
func (tc *PodmanTest) TestRedisDeployment(f *framework.F) {
|
||||
t := f.T()
|
||||
nomadClient := tc.Nomad()
|
||||
uuid := uuid.Generate()
|
||||
jobID := "deployment" + uuid[0:8]
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
e2eutil.RegisterAndWaitForAllocs(t, nomadClient, "podman/input/redis.nomad", jobID, "")
|
||||
ds := e2eutil.DeploymentsForJob(t, nomadClient, jobID)
|
||||
require.Equal(t, 1, len(ds))
|
||||
|
||||
jobs := nomadClient.Jobs()
|
||||
allocs, _, err := jobs.Allocations(jobID, true, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
var allocIDs []string
|
||||
for _, alloc := range allocs {
|
||||
allocIDs = append(allocIDs, alloc.ID)
|
||||
}
|
||||
|
||||
// Wait for allocations to get past initial pending state
|
||||
e2eutil.WaitForAllocsNotPending(t, nomadClient, allocIDs)
|
||||
|
||||
jobs = nomadClient.Jobs()
|
||||
allocs, _, err = jobs.Allocations(jobID, true, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, allocs, 1)
|
||||
require.Equal(t, allocs[0].ClientStatus, "running")
|
||||
}
|
||||
|
||||
func (tc *PodmanTest) AfterEach(f *framework.F) {
|
||||
nomadClient := tc.Nomad()
|
||||
|
||||
// Mark all nodes eligible
|
||||
nodesAPI := tc.Nomad().Nodes()
|
||||
nodes, _, _ := nodesAPI.List(nil)
|
||||
for _, node := range nodes {
|
||||
nodesAPI.ToggleEligibility(node.ID, true, nil)
|
||||
}
|
||||
|
||||
jobs := nomadClient.Jobs()
|
||||
// Stop all jobs in test
|
||||
for _, id := range tc.jobIDs {
|
||||
jobs.Deregister(id, true, nil)
|
||||
}
|
||||
tc.jobIDs = []string{}
|
||||
// Garbage collect
|
||||
nomadClient.System().GarbageCollect()
|
||||
}
|
||||
@@ -136,9 +136,9 @@ ssh into nodes with:
|
||||
ssh -i keys/${local.random_name}.pem ubuntu@${aws_instance.server[0].public_ip}
|
||||
|
||||
# clients
|
||||
%{ for ip in aws_instance.client_linux.*.public_ip ~}
|
||||
%{for ip in aws_instance.client_linux.*.public_ip~}
|
||||
ssh -i keys/${local.random_name}.pem ubuntu@${ip}
|
||||
%{ endfor ~}
|
||||
%{endfor~}
|
||||
```
|
||||
EOM
|
||||
|
||||
|
||||
@@ -4,17 +4,17 @@ set -e
|
||||
|
||||
# Disable interactive apt prompts
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
|
||||
|
||||
|
||||
sudo mkdir -p /ops/shared
|
||||
sudo chown -R ubuntu:ubuntu /ops/shared
|
||||
|
||||
cd /ops
|
||||
|
||||
CONSULVERSION=1.7.3
|
||||
CONSULDOWNLOAD=https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip
|
||||
CONSULCONFIGDIR=/etc/consul.d
|
||||
CONSULDIR=/opt/consul
|
||||
|
||||
VAULTVERSION=1.1.1
|
||||
VAULTDOWNLOAD=https://releases.hashicorp.com/vault/${VAULTVERSION}/vault_${VAULTVERSION}_linux_amd64.zip
|
||||
VAULTCONFIGDIR=/etc/vault.d
|
||||
@@ -25,15 +25,15 @@ NOMADVERSION=0.9.1
|
||||
NOMADDOWNLOAD=https://releases.hashicorp.com/nomad/${NOMADVERSION}/nomad_${NOMADVERSION}_linux_amd64.zip
|
||||
NOMADCONFIGDIR=/etc/nomad.d
|
||||
NOMADDIR=/opt/nomad
|
||||
NOMADPLUGINDIR=/opt/nomad/plugins
|
||||
|
||||
# Dependencies
|
||||
sudo apt-get install -y software-properties-common
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y unzip tree redis-tools jq curl tmux awscli nfs-common
|
||||
sudo apt-get install -y dnsmasq unzip tree redis-tools jq curl tmux awscli nfs-common
|
||||
|
||||
# Numpy (for Spark)
|
||||
sudo apt-get install -y python-setuptools
|
||||
sudo easy_install pip
|
||||
sudo apt-get install -y python-setuptools python-pip
|
||||
sudo pip install numpy
|
||||
|
||||
# Install sockaddr
|
||||
@@ -80,6 +80,8 @@ sudo mkdir -p $NOMADCONFIGDIR
|
||||
sudo chmod 755 $NOMADCONFIGDIR
|
||||
sudo mkdir -p $NOMADDIR
|
||||
sudo chmod 755 $NOMADDIR
|
||||
sudo mkdir -p $NOMADPLUGINDIR
|
||||
sudo chmod 755 $NOMADPLUGINDIR
|
||||
|
||||
echo "Install Docker"
|
||||
distro=$(lsb_release -si | tr '[:upper:]' '[:lower:]')
|
||||
@@ -108,6 +110,48 @@ sudo mkdir -p "$HADOOPCONFIGDIR"
|
||||
|
||||
wget -O - http://apache.mirror.iphh.net/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz | sudo tar xz -C /usr/local/
|
||||
|
||||
echo "Install Podman"
|
||||
. /etc/os-release
|
||||
sudo sh -c "echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list"
|
||||
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add -
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -qq -y install podman
|
||||
|
||||
# get catatonit (to check podman --init switch)
|
||||
cd /tmp
|
||||
wget https://github.com/openSUSE/catatonit/releases/download/v0.1.4/catatonit.x86_64
|
||||
mkdir -p /usr/libexec/podman
|
||||
sudo mv catatonit* /usr/libexec/podman/catatonit
|
||||
sudo chmod +x /usr/libexec/podman/catatonit
|
||||
|
||||
echo "Install podman task driver"
|
||||
# install nomad-podman-driver and move to plugin dir
|
||||
wget -P /tmp https://github.com/pascomnet/nomad-driver-podman/releases/download/v0.0.3/nomad-driver-podman_linux_amd64.tar.gz
|
||||
sudo tar -xf /tmp/nomad-driver-podman_linux_amd64.tar.gz -C /tmp
|
||||
sudo mv /tmp/nomad-driver-podman/nomad-driver-podman $NOMADPLUGINDIR
|
||||
sudo chmod +x $NOMADPLUGINDIR/nomad-driver-podman
|
||||
|
||||
# disable systemd-resolved and configure dnsmasq
|
||||
# to forward local requests to consul
|
||||
sudo systemctl disable systemd-resolved.service
|
||||
sudo rm /etc/resolv.conf
|
||||
echo "nameserver 8.8.8.8" | sudo tee /etc/resolv.conf
|
||||
echo '
|
||||
port=53
|
||||
resolv-file=/var/run/dnsmasq/resolv.conf
|
||||
bind-interfaces
|
||||
listen-address=127.0.0.1
|
||||
server=/consul/127.0.0.1#8600
|
||||
' | sudo tee /etc/dnsmasq.d/default
|
||||
|
||||
# add our hostname to etc/hosts
|
||||
echo "127.0.0.1 $(hostname)" | sudo tee -a /etc/hosts
|
||||
sudo systemctl restart dnsmasq
|
||||
|
||||
# enable cgroup_memory and swap
|
||||
sudo sed -i 's/GRUB_CMDLINE_LINUX="[^"]*/& cgroup_enable=memory swapaccount=1/' /etc/default/grub
|
||||
sudo update-grub
|
||||
|
||||
# note this 'EOF' syntax avoids expansion in the heredoc
|
||||
sudo tee "$HADOOPCONFIGDIR/core-site.xml" << 'EOF'
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
{
|
||||
"type": "amazon-ebs",
|
||||
"region": "us-east-1",
|
||||
"source_ami": "ami-80861296",
|
||||
"source_ami": "ami-7ad76705",
|
||||
"instance_type": "t2.medium",
|
||||
"ssh_username": "ubuntu",
|
||||
"iam_instance_profile": "packer-builder",
|
||||
|
||||
@@ -52,9 +52,42 @@ wget -q -O - \
|
||||
https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz \
|
||||
| sudo tar -C /opt/cni/bin -xz
|
||||
|
||||
# enable varlink socket (not included in ubuntu package)
|
||||
cat > /etc/systemd/system/io.podman.service << EOF
|
||||
[Unit]
|
||||
Description=Podman Remote API Service
|
||||
Requires=io.podman.socket
|
||||
After=io.podman.socket
|
||||
Documentation=man:podman-varlink(1)
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/podman varlink unix:%t/podman/io.podman --timeout=60000
|
||||
TimeoutStopSec=30
|
||||
KillMode=process
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Also=io.podman.socket
|
||||
EOF
|
||||
|
||||
cat > /etc/systemd/system/io.podman.socket << EOF
|
||||
[Unit]
|
||||
Description=Podman Remote API Socket
|
||||
Documentation=man:podman-varlink(1) https://podman.io/blogs/2019/01/16/podman-varlink.html
|
||||
|
||||
[Socket]
|
||||
ListenStream=%t/podman/io.podman
|
||||
SocketMode=0600
|
||||
|
||||
[Install]
|
||||
WantedBy=sockets.target
|
||||
EOF
|
||||
|
||||
# enable as a systemd service
|
||||
sudo cp "$NOMAD_SRC/nomad.service" /etc/systemd/system/nomad.service
|
||||
|
||||
sudo systemctl enable nomad.service
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl start io.podman
|
||||
sudo systemctl restart nomad.service
|
||||
|
||||
@@ -6,7 +6,7 @@ After=network-online.target
|
||||
[Service]
|
||||
Restart=on-failure
|
||||
Environment=CONSUL_ALLOW_PRIVILEGED_PORTS=true
|
||||
ExecStart=/usr/local/bin/consul agent -config-dir="/etc/consul.d" -dns-port="53" -recursor="172.31.0.2"
|
||||
ExecStart=/usr/local/bin/consul agent -config-dir="/etc/consul.d" -recursor="172.31.0.2"
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillSignal=SIGTERM
|
||||
User=root
|
||||
|
||||
@@ -6,7 +6,7 @@ After=network-online.target
|
||||
[Service]
|
||||
Restart=on-failure
|
||||
Environment=CONSUL_ALLOW_PRIVILEGED_PORTS=true
|
||||
ExecStart=/usr/local/bin/consul agent -config-dir="/etc/consul.d" -dns-port="53" -recursor="168.63.129.16"
|
||||
ExecStart=/usr/local/bin/consul agent -config-dir="/etc/consul.d" -recursor="168.63.129.16"
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillSignal=SIGTERM
|
||||
User=root
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
plugin_dir = "/opt/nomad/plugins"
|
||||
|
||||
client {
|
||||
enabled = true
|
||||
|
||||
@@ -17,6 +19,14 @@ client {
|
||||
}
|
||||
}
|
||||
|
||||
plugin "nomad-driver-podman" {
|
||||
config {
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "http://active.vault.service.consul:8200"
|
||||
|
||||
@@ -16,6 +16,15 @@ client {
|
||||
}
|
||||
}
|
||||
|
||||
plugin_dir = "/opt/nomad/plugins"
|
||||
plugin "nomad-driver-podman" {
|
||||
config {
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "http://active.vault.service.consul:8200"
|
||||
|
||||
@@ -11,6 +11,15 @@ client {
|
||||
}
|
||||
}
|
||||
|
||||
plugin_dir = "/opt/nomad/plugins"
|
||||
plugin "nomad-driver-podman" {
|
||||
config {
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "http://active.vault.service.consul:8200"
|
||||
|
||||
@@ -13,6 +13,15 @@ client {
|
||||
}
|
||||
}
|
||||
|
||||
plugin_dir = "/opt/nomad/plugins"
|
||||
plugin "nomad-driver-podman" {
|
||||
config {
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
enabled = true
|
||||
address = "http://active.vault.service.consul:8200"
|
||||
|
||||
@@ -13,12 +13,9 @@ LimitNPROC=infinity
|
||||
TasksMax=infinity
|
||||
Restart=on-failure
|
||||
RestartSec=2
|
||||
|
||||
# systemd>=230 prefer StartLimitIntervalSec,StartLimitBurst in Unit,
|
||||
# however Ubuntu 16.04 only has systemd==229. Use these old style settings
|
||||
# as they will be supported by newer systemds.
|
||||
StartLimitIntervalSec=10
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=10
|
||||
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
Reference in New Issue
Block a user