mirror of
https://github.com/kemko/nomad.git
synced 2026-01-09 11:55:42 +03:00
Merge pull request #476 from hashicorp/b-docker-expose
Fixed some issues with expose, port mapping, and environment variables
This commit is contained in:
@@ -2,13 +2,14 @@ package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
consul "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"log"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
consul "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -95,7 +96,7 @@ func (c *ConsulClient) ShutDown() {
|
||||
|
||||
func (c *ConsulClient) findPortAndHostForLabel(portLabel string, task *structs.Task) (string, int) {
|
||||
for _, network := range task.Resources.Networks {
|
||||
if p, ok := network.MapLabelToValues()[portLabel]; ok {
|
||||
if p, ok := network.MapLabelToValues(nil)[portLabel]; ok {
|
||||
return network.IP, p
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,12 +37,14 @@ type DockerDriverConfig struct {
|
||||
Command string `mapstructure:"command"` // The Command/Entrypoint to run when the container starts up
|
||||
Args []string `mapstructure:"args"` // The arguments to the Command/Entrypoint
|
||||
NetworkMode string `mapstructure:"network_mode"` // The network mode of the container - host, net and none
|
||||
PortMap []map[string]int `mapstructure:"port_map"` // A map of host port labels and the ports exposed on the container
|
||||
PortMapRaw []map[string]int `mapstructure:"port_map"` //
|
||||
PortMap map[string]int `mapstructure:"-"` // A map of host port labels and the ports exposed on the container
|
||||
Privileged bool `mapstructure:"privileged"` // Flag to run the container in priviledged mode
|
||||
DNSServers []string `mapstructure:"dns_servers"` // DNS Server for containers
|
||||
DNSSearchDomains []string `mapstructure:"dns_search_domains"` // DNS Search domains for containers
|
||||
Hostname string `mapstructure:"hostname"` // Hostname for containers
|
||||
Labels []map[string]string `mapstructure:"labels"` // Labels to set when the container starts up
|
||||
LabelsRaw []map[string]string `mapstructure:"labels"` //
|
||||
Labels map[string]string `mapstructure:"-"` // Labels to set when the container starts up
|
||||
Auth []DockerDriverAuth `mapstructure:"auth"` // Authentication credentials for a private Docker registry
|
||||
}
|
||||
|
||||
@@ -51,13 +53,9 @@ func (c *DockerDriverConfig) Validate() error {
|
||||
return fmt.Errorf("Docker Driver needs an image name")
|
||||
}
|
||||
|
||||
if len(c.PortMap) > 1 {
|
||||
return fmt.Errorf("Only one port_map block is allowed in the docker driver config")
|
||||
}
|
||||
c.PortMap = mapMergeStrInt(c.PortMapRaw...)
|
||||
c.Labels = mapMergeStrStr(c.LabelsRaw...)
|
||||
|
||||
if len(c.Labels) > 1 {
|
||||
return fmt.Errorf("Only one labels block is allowed in the docker driver config")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -239,7 +237,7 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
|
||||
// Setup port mapping and exposed ports
|
||||
if len(task.Resources.Networks) == 0 {
|
||||
d.logger.Println("[DEBUG] driver.docker: No network interfaces are available")
|
||||
if len(driverConfig.PortMap) == 1 && len(driverConfig.PortMap[0]) > 0 {
|
||||
if len(driverConfig.PortMap) > 0 {
|
||||
return c, fmt.Errorf("Trying to map ports but no network interface is available")
|
||||
}
|
||||
} else {
|
||||
@@ -249,8 +247,16 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
|
||||
exposedPorts := map[docker.Port]struct{}{}
|
||||
|
||||
for _, port := range network.ReservedPorts {
|
||||
// By default we will map the allocated port 1:1 to the container
|
||||
containerPortInt := port.Value
|
||||
|
||||
// If the user has mapped a port using port_map we'll change it here
|
||||
if mapped, ok := driverConfig.PortMap[port.Label]; ok {
|
||||
containerPortInt = mapped
|
||||
}
|
||||
|
||||
hostPortStr := strconv.Itoa(port.Value)
|
||||
containerPort := docker.Port(hostPortStr)
|
||||
containerPort := docker.Port(strconv.Itoa(containerPortInt))
|
||||
|
||||
publishedPorts[containerPort+"/tcp"] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPortStr}}
|
||||
publishedPorts[containerPort+"/udp"] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPortStr}}
|
||||
@@ -261,21 +267,16 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
|
||||
d.logger.Printf("[DEBUG] driver.docker: exposed port %d", port.Value)
|
||||
}
|
||||
|
||||
containerToHostPortMap := make(map[string]int)
|
||||
for _, port := range network.DynamicPorts {
|
||||
// By default we will map the allocated port 1:1 to the container
|
||||
containerPortInt := port.Value
|
||||
|
||||
// If the user has mapped a port using port_map we'll change it here
|
||||
if len(driverConfig.PortMap) == 1 {
|
||||
mapped, ok := driverConfig.PortMap[0][port.Label]
|
||||
if ok {
|
||||
containerPortInt = mapped
|
||||
}
|
||||
if mapped, ok := driverConfig.PortMap[port.Label]; ok {
|
||||
containerPortInt = mapped
|
||||
}
|
||||
|
||||
hostPortStr := strconv.Itoa(port.Value)
|
||||
// containerPort := docker.Port(hostPortStr)
|
||||
containerPort := docker.Port(strconv.Itoa(containerPortInt))
|
||||
|
||||
publishedPorts[containerPort+"/tcp"] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: hostPortStr}}
|
||||
@@ -284,12 +285,18 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
|
||||
|
||||
exposedPorts[containerPort+"/tcp"] = struct{}{}
|
||||
exposedPorts[containerPort+"/udp"] = struct{}{}
|
||||
d.logger.Printf("[DEBUG] driver.docker: exposed port %s", hostPortStr)
|
||||
|
||||
containerToHostPortMap[string(containerPort)] = port.Value
|
||||
d.logger.Printf("[DEBUG] driver.docker: exposed port %s", containerPort)
|
||||
}
|
||||
|
||||
env.SetPorts(containerToHostPortMap)
|
||||
// This was set above in a call to TaskEnvironmentVariables but if we
|
||||
// have mapped any ports we will need to override them.
|
||||
//
|
||||
// TODO refactor the implementation in TaskEnvironmentVariables to match
|
||||
// the 0.2 ports world view. Docker seems to be the only place where
|
||||
// this is actually needed, but this is kinda hacky.
|
||||
if len(driverConfig.PortMap) > 0 {
|
||||
env.SetPorts(network.MapLabelToValues(driverConfig.PortMap))
|
||||
}
|
||||
hostConfig.PortBindings = publishedPorts
|
||||
config.ExposedPorts = exposedPorts
|
||||
}
|
||||
@@ -309,8 +316,8 @@ func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, dri
|
||||
d.logger.Println("[DEBUG] driver.docker: ignoring command arguments because command is not specified")
|
||||
}
|
||||
|
||||
if len(driverConfig.Labels) == 1 {
|
||||
config.Labels = driverConfig.Labels[0]
|
||||
if len(driverConfig.Labels) > 0 {
|
||||
config.Labels = driverConfig.Labels
|
||||
d.logger.Printf("[DEBUG] driver.docker: applied labels on the container: %+v", config.Labels)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -54,6 +55,69 @@ func dockerIsRemote(t *testing.T) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func dockerTask() *structs.Task {
|
||||
return &structs.Task{
|
||||
Name: "redis-demo",
|
||||
Config: map[string]interface{}{
|
||||
"image": "redis",
|
||||
},
|
||||
Resources: &structs.Resources{
|
||||
MemoryMB: 256,
|
||||
CPU: 512,
|
||||
Networks: []*structs.NetworkResource{
|
||||
&structs.NetworkResource{
|
||||
IP: "127.0.0.1",
|
||||
ReservedPorts: []structs.Port{{"main", 11110}},
|
||||
DynamicPorts: []structs.Port{{"REDIS", 43330}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// dockerSetup does all of the basic setup you need to get a running docker
|
||||
// process up and running for testing. Use like:
|
||||
//
|
||||
// task := taskTemplate()
|
||||
// // do custom task configuration
|
||||
// client, handle, cleanup := dockerSetup(t, task)
|
||||
// defer cleanup()
|
||||
// // do test stuff
|
||||
//
|
||||
// If there is a problem during setup this function will abort or skip the test
|
||||
// and indicate the reason.
|
||||
func dockerSetup(t *testing.T, task *structs.Task) (*docker.Client, DriverHandle, func()) {
|
||||
if !dockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
client, err := docker.NewClientFromEnv()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack())
|
||||
}
|
||||
|
||||
driverCtx := testDockerDriverContext(task.Name)
|
||||
ctx := testDriverExecContext(task, driverCtx)
|
||||
driver := NewDockerDriver(driverCtx)
|
||||
|
||||
handle, err := driver.Start(ctx, task)
|
||||
if err != nil {
|
||||
ctx.AllocDir.Destroy()
|
||||
t.Fatalf("Failed to start driver: %s\nStack\n%s", err, debug.Stack())
|
||||
}
|
||||
if handle == nil {
|
||||
ctx.AllocDir.Destroy()
|
||||
t.Fatalf("handle is nil\nStack\n%s", debug.Stack())
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
handle.Kill()
|
||||
ctx.AllocDir.Destroy()
|
||||
}
|
||||
|
||||
return client, handle, cleanup
|
||||
}
|
||||
|
||||
func TestDockerDriver_Handle(t *testing.T) {
|
||||
h := &DockerHandle{
|
||||
imageID: "imageid",
|
||||
@@ -126,10 +190,6 @@ func TestDockerDriver_StartOpen_Wait(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDockerDriver_Start_Wait(t *testing.T) {
|
||||
if !dockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
task := &structs.Task{
|
||||
Name: "redis-demo",
|
||||
Config: map[string]interface{}{
|
||||
@@ -143,22 +203,11 @@ func TestDockerDriver_Start_Wait(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
driverCtx := testDockerDriverContext(task.Name)
|
||||
ctx := testDriverExecContext(task, driverCtx)
|
||||
defer ctx.AllocDir.Destroy()
|
||||
d := NewDockerDriver(driverCtx)
|
||||
|
||||
handle, err := d.Start(ctx, task)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if handle == nil {
|
||||
t.Fatalf("missing handle")
|
||||
}
|
||||
defer handle.Kill()
|
||||
_, handle, cleanup := dockerSetup(t, task)
|
||||
defer cleanup()
|
||||
|
||||
// Update should be a no-op
|
||||
err = handle.Update(task)
|
||||
err := handle.Update(task)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
@@ -236,10 +285,6 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
|
||||
if !dockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
task := &structs.Task{
|
||||
Name: "redis-demo",
|
||||
Config: map[string]interface{}{
|
||||
@@ -250,19 +295,8 @@ func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
|
||||
Resources: basicResources,
|
||||
}
|
||||
|
||||
driverCtx := testDockerDriverContext(task.Name)
|
||||
ctx := testDriverExecContext(task, driverCtx)
|
||||
defer ctx.AllocDir.Destroy()
|
||||
d := NewDockerDriver(driverCtx)
|
||||
|
||||
handle, err := d.Start(ctx, task)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if handle == nil {
|
||||
t.Fatalf("missing handle")
|
||||
}
|
||||
defer handle.Kill()
|
||||
_, handle, cleanup := dockerSetup(t, task)
|
||||
defer cleanup()
|
||||
|
||||
go func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@@ -282,40 +316,20 @@ func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func taskTemplate() *structs.Task {
|
||||
return &structs.Task{
|
||||
Name: "redis-demo",
|
||||
Config: map[string]interface{}{
|
||||
"image": "redis",
|
||||
},
|
||||
Resources: &structs.Resources{
|
||||
MemoryMB: 256,
|
||||
CPU: 512,
|
||||
Networks: []*structs.NetworkResource{
|
||||
&structs.NetworkResource{
|
||||
IP: "127.0.0.1",
|
||||
ReservedPorts: []structs.Port{{"main", 11110}},
|
||||
DynamicPorts: []structs.Port{{"REDIS", 43330}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestDocker_StartN(t *testing.T) {
|
||||
if !dockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
task1 := taskTemplate()
|
||||
task1 := dockerTask()
|
||||
task1.Resources.Networks[0].ReservedPorts[0] = structs.Port{"main", 11110}
|
||||
task1.Resources.Networks[0].DynamicPorts[0] = structs.Port{"REDIS", 43331}
|
||||
|
||||
task2 := taskTemplate()
|
||||
task2 := dockerTask()
|
||||
task2.Resources.Networks[0].ReservedPorts[0] = structs.Port{"main", 22222}
|
||||
task2.Resources.Networks[0].DynamicPorts[0] = structs.Port{"REDIS", 43332}
|
||||
|
||||
task3 := taskTemplate()
|
||||
task3 := dockerTask()
|
||||
task3.Resources.Networks[0].ReservedPorts[0] = structs.Port{"main", 33333}
|
||||
task3.Resources.Networks[0].DynamicPorts[0] = structs.Port{"REDIS", 43333}
|
||||
|
||||
@@ -361,17 +375,17 @@ func TestDocker_StartNVersions(t *testing.T) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
task1 := taskTemplate()
|
||||
task1 := dockerTask()
|
||||
task1.Config["image"] = "redis"
|
||||
task1.Resources.Networks[0].ReservedPorts[0] = structs.Port{"main", 11110}
|
||||
task1.Resources.Networks[0].DynamicPorts[0] = structs.Port{"REDIS", 43331}
|
||||
|
||||
task2 := taskTemplate()
|
||||
task2 := dockerTask()
|
||||
task2.Config["image"] = "redis:latest"
|
||||
task2.Resources.Networks[0].ReservedPorts[0] = structs.Port{"main", 22222}
|
||||
task2.Resources.Networks[0].DynamicPorts[0] = structs.Port{"REDIS", 43332}
|
||||
|
||||
task3 := taskTemplate()
|
||||
task3 := dockerTask()
|
||||
task3.Config["image"] = "redis:3.0"
|
||||
task3.Resources.Networks[0].ReservedPorts[0] = structs.Port{"main", 33333}
|
||||
task3.Resources.Networks[0].DynamicPorts[0] = structs.Port{"REDIS", 43333}
|
||||
@@ -414,42 +428,36 @@ func TestDocker_StartNVersions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDockerHostNet(t *testing.T) {
|
||||
if !dockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
expected := "host"
|
||||
|
||||
task := &structs.Task{
|
||||
Name: "redis-demo",
|
||||
Config: map[string]interface{}{
|
||||
"image": "redis",
|
||||
"network_mode": "host",
|
||||
"network_mode": expected,
|
||||
},
|
||||
Resources: &structs.Resources{
|
||||
MemoryMB: 256,
|
||||
CPU: 512,
|
||||
},
|
||||
}
|
||||
driverCtx := testDockerDriverContext(task.Name)
|
||||
ctx := testDriverExecContext(task, driverCtx)
|
||||
defer ctx.AllocDir.Destroy()
|
||||
d := NewDockerDriver(driverCtx)
|
||||
|
||||
handle, err := d.Start(ctx, task)
|
||||
client, handle, cleanup := dockerSetup(t, task)
|
||||
defer cleanup()
|
||||
|
||||
container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if handle == nil {
|
||||
t.Fatalf("missing handle")
|
||||
|
||||
actual := container.HostConfig.NetworkMode
|
||||
if actual != expected {
|
||||
t.Errorf("DNS Network mode doesn't match.\nExpected:\n%s\nGot:\n%s\n", expected, actual)
|
||||
}
|
||||
defer handle.Kill()
|
||||
}
|
||||
|
||||
func TestDockerLabels(t *testing.T) {
|
||||
if !dockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
task := taskTemplate()
|
||||
task := dockerTask()
|
||||
task.Config["labels"] = []map[string]string{
|
||||
map[string]string{
|
||||
"label1": "value1",
|
||||
@@ -457,24 +465,8 @@ func TestDockerLabels(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
driverCtx := testDockerDriverContext(task.Name)
|
||||
ctx := testDriverExecContext(task, driverCtx)
|
||||
defer ctx.AllocDir.Destroy()
|
||||
d := NewDockerDriver(driverCtx)
|
||||
|
||||
handle, err := d.Start(ctx, task)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if handle == nil {
|
||||
t.Fatalf("missing handle")
|
||||
}
|
||||
defer handle.Kill()
|
||||
|
||||
client, err := docker.NewClientFromEnv()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
client, handle, cleanup := dockerSetup(t, task)
|
||||
defer cleanup()
|
||||
|
||||
container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID())
|
||||
if err != nil {
|
||||
@@ -491,34 +483,13 @@ func TestDockerLabels(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDockerDNS(t *testing.T) {
|
||||
if !dockerIsConnected(t) {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
task := taskTemplate()
|
||||
task := dockerTask()
|
||||
task.Config["dns_servers"] = []string{"8.8.8.8", "8.8.4.4"}
|
||||
task.Config["dns_search_domains"] = []string{"example.com", "example.org", "example.net"}
|
||||
|
||||
driverCtx := testDockerDriverContext(task.Name)
|
||||
ctx := testDriverExecContext(task, driverCtx)
|
||||
defer ctx.AllocDir.Destroy()
|
||||
d := NewDockerDriver(driverCtx)
|
||||
client, handle, cleanup := dockerSetup(t, task)
|
||||
defer cleanup()
|
||||
|
||||
handle, err := d.Start(ctx, task)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if handle == nil {
|
||||
t.Fatalf("missing handle")
|
||||
}
|
||||
defer handle.Kill()
|
||||
|
||||
client, err := docker.NewClientFromEnv()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// don't know if is queriable in a clean way
|
||||
container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
@@ -532,3 +503,116 @@ func TestDockerDNS(t *testing.T) {
|
||||
t.Errorf("DNS Servers don't match.\nExpected:\n%s\nGot:\n%s\n", task.Config["dns_search_domains"], container.HostConfig.DNSSearch)
|
||||
}
|
||||
}
|
||||
|
||||
func inSlice(needle string, haystack []string) bool {
|
||||
for _, h := range haystack {
|
||||
if h == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func TestDockerPortsNoMap(t *testing.T) {
|
||||
task := dockerTask()
|
||||
|
||||
client, handle, cleanup := dockerSetup(t, task)
|
||||
defer cleanup()
|
||||
|
||||
container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Verify that the correct ports are EXPOSED
|
||||
expectedExposedPorts := map[docker.Port]struct{}{
|
||||
docker.Port("11110/tcp"): struct{}{},
|
||||
docker.Port("11110/udp"): struct{}{},
|
||||
docker.Port("43330/tcp"): struct{}{},
|
||||
docker.Port("43330/udp"): struct{}{},
|
||||
// This one comes from the redis container
|
||||
docker.Port("6379/tcp"): struct{}{},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(container.Config.ExposedPorts, expectedExposedPorts) {
|
||||
t.Errorf("Exposed ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedExposedPorts, container.Config.ExposedPorts)
|
||||
}
|
||||
|
||||
// Verify that the correct ports are FORWARDED
|
||||
expectedPortBindings := map[docker.Port][]docker.PortBinding{
|
||||
docker.Port("11110/tcp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: "11110"}},
|
||||
docker.Port("11110/udp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: "11110"}},
|
||||
docker.Port("43330/tcp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: "43330"}},
|
||||
docker.Port("43330/udp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: "43330"}},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(container.HostConfig.PortBindings, expectedPortBindings) {
|
||||
t.Errorf("Forwarded ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedPortBindings, container.HostConfig.PortBindings)
|
||||
}
|
||||
|
||||
expectedEnvironment := map[string]string{
|
||||
"NOMAD_PORT_main": "11110",
|
||||
"NOMAD_PORT_REDIS": "43330",
|
||||
}
|
||||
|
||||
for key, val := range expectedEnvironment {
|
||||
search := fmt.Sprintf("%s=%s", key, val)
|
||||
if !inSlice(search, container.Config.Env) {
|
||||
t.Errorf("Expected to find %s in container environment: %+v", search, container.Config.Env)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerPortsMapping(t *testing.T) {
|
||||
task := dockerTask()
|
||||
task.Config["port_map"] = []map[string]string{
|
||||
map[string]string{
|
||||
"main": "8080",
|
||||
"REDIS": "6379",
|
||||
},
|
||||
}
|
||||
|
||||
client, handle, cleanup := dockerSetup(t, task)
|
||||
defer cleanup()
|
||||
|
||||
container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Verify that the correct ports are EXPOSED
|
||||
expectedExposedPorts := map[docker.Port]struct{}{
|
||||
docker.Port("8080/tcp"): struct{}{},
|
||||
docker.Port("8080/udp"): struct{}{},
|
||||
docker.Port("6379/tcp"): struct{}{},
|
||||
docker.Port("6379/udp"): struct{}{},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(container.Config.ExposedPorts, expectedExposedPorts) {
|
||||
t.Errorf("Exposed ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedExposedPorts, container.Config.ExposedPorts)
|
||||
}
|
||||
|
||||
// Verify that the correct ports are FORWARDED
|
||||
expectedPortBindings := map[docker.Port][]docker.PortBinding{
|
||||
docker.Port("8080/tcp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: "11110"}},
|
||||
docker.Port("8080/udp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: "11110"}},
|
||||
docker.Port("6379/tcp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: "43330"}},
|
||||
docker.Port("6379/udp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: "43330"}},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(container.HostConfig.PortBindings, expectedPortBindings) {
|
||||
t.Errorf("Forwarded ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedPortBindings, container.HostConfig.PortBindings)
|
||||
}
|
||||
|
||||
expectedEnvironment := map[string]string{
|
||||
"NOMAD_PORT_main": "8080",
|
||||
"NOMAD_PORT_REDIS": "6379",
|
||||
}
|
||||
|
||||
for key, val := range expectedEnvironment {
|
||||
search := fmt.Sprintf("%s=%s", key, val)
|
||||
if !inSlice(search, container.Config.Env) {
|
||||
t.Errorf("Expected to find %s in container environment: %+v", search, container.Config.Env)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,7 +135,7 @@ func TaskEnvironmentVariables(ctx *ExecContext, task *structs.Task) environment.
|
||||
if len(task.Resources.Networks) > 0 {
|
||||
network := task.Resources.Networks[0]
|
||||
env.SetTaskIp(network.IP)
|
||||
env.SetPorts(network.MapLabelToValues())
|
||||
env.SetPorts(network.MapLabelToValues(nil))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,3 +145,23 @@ func TaskEnvironmentVariables(ctx *ExecContext, task *structs.Task) environment.
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
func mapMergeStrInt(maps ...map[string]int) map[string]int {
|
||||
out := map[string]int{}
|
||||
for _, in := range maps {
|
||||
for key, val := range in {
|
||||
out[key] = val
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func mapMergeStrStr(maps ...map[string]string) map[string]string {
|
||||
out := map[string]string{}
|
||||
for _, in := range maps {
|
||||
for key, val := range in {
|
||||
out[key] = val
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -99,3 +99,51 @@ func TestDriver_TaskEnvironmentVariables(t *testing.T) {
|
||||
t.Fatalf("TaskEnvironmentVariables(%#v, %#v) returned %#v; want %#v", ctx, task, act, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapMergeStrInt(t *testing.T) {
|
||||
a := map[string]int{
|
||||
"cakes": 5,
|
||||
"cookies": 3,
|
||||
}
|
||||
|
||||
b := map[string]int{
|
||||
"cakes": 3,
|
||||
"pies": 2,
|
||||
}
|
||||
|
||||
c := mapMergeStrInt(a, b)
|
||||
|
||||
d := map[string]int{
|
||||
"cakes": 3,
|
||||
"cookies": 3,
|
||||
"pies": 2,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(c, d) {
|
||||
t.Errorf("\nExpected\n%+v\nGot\n%+v\n", d, c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapMergeStrStr(t *testing.T) {
|
||||
a := map[string]string{
|
||||
"cake": "chocolate",
|
||||
"cookie": "caramel",
|
||||
}
|
||||
|
||||
b := map[string]string{
|
||||
"cake": "strawberry",
|
||||
"pie": "apple",
|
||||
}
|
||||
|
||||
c := mapMergeStrStr(a, b)
|
||||
|
||||
d := map[string]string{
|
||||
"cake": "strawberry",
|
||||
"cookie": "caramel",
|
||||
"pie": "apple",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(c, d) {
|
||||
t.Errorf("\nExpected\n%+v\nGot\n%+v\n", d, c)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,7 +148,7 @@ func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
|
||||
// reserved ports to the ports listenting in the VM
|
||||
// Ex: hostfwd=tcp::22000-:22,hostfwd=tcp::80-:8080
|
||||
var forwarding []string
|
||||
taskPorts := task.Resources.Networks[0].MapLabelToValues()
|
||||
taskPorts := task.Resources.Networks[0].MapLabelToValues(nil)
|
||||
for label, guest := range driverConfig.PortMap[0] {
|
||||
host, ok := taskPorts[label]
|
||||
if !ok {
|
||||
|
||||
@@ -662,11 +662,15 @@ func (n *NetworkResource) GoString() string {
|
||||
return fmt.Sprintf("*%#v", *n)
|
||||
}
|
||||
|
||||
func (n *NetworkResource) MapLabelToValues() map[string]int {
|
||||
func (n *NetworkResource) MapLabelToValues(port_map map[string]int) map[string]int {
|
||||
labelValues := make(map[string]int)
|
||||
ports := append(n.ReservedPorts, n.DynamicPorts...)
|
||||
for _, port := range ports {
|
||||
labelValues[port.Label] = port.Value
|
||||
if mapping, ok := port_map[port.Label]; ok {
|
||||
labelValues[port.Label] = mapping
|
||||
} else {
|
||||
labelValues[port.Label] = port.Value
|
||||
}
|
||||
}
|
||||
return labelValues
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user