client: expose network namespace CNI config as task env vars. (#11810)

This change exposes CNI configuration details of a network
namespace as environment variables. This allows a task to use
these value to configure itself; a potential use case is to run
a Raft application binding to IP and Port details configured using
the bridge network mode.
This commit is contained in:
James Rasell
2024-05-14 10:02:06 +02:00
committed by GitHub
parent 169818b1bd
commit 04ba358266
8 changed files with 181 additions and 1 deletions

3
.changelog/11810.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
client: expose network namespace bridge/cni configuration values as task env vars
```

View File

@@ -928,6 +928,12 @@ func (ar *allocRunner) SetNetworkStatus(s *structs.AllocNetworkStatus) {
ans := s.Copy()
ar.state.NetworkStatus = ans
ar.hookResources.SetAllocNetworkStatus(ans)
// Iterate each task runner and add the status information. This allows the
// task to build the environment variables with this information available.
for _, tr := range ar.tasks {
tr.SetNetworkStatus(ans)
}
}
func (ar *allocRunner) NetworkStatus() *structs.AllocNetworkStatus {

View File

@@ -258,6 +258,12 @@ type TaskRunner struct {
networkIsolationLock sync.Mutex
networkIsolationSpec *drivers.NetworkIsolationSpec
// allocNetworkStatus is provided from the allocrunner and allows us to
// include this information as env vars for the task. When manipulating
// this the allocNetworkStatusLock should be used.
allocNetworkStatusLock sync.Mutex
allocNetworkStatus *structs.AllocNetworkStatus
// serviceRegWrapper is the handler wrapper that is used by service hooks
// to perform service and check registration and deregistration.
serviceRegWrapper *wrapper.HandlerWrapper
@@ -1456,6 +1462,19 @@ func (tr *TaskRunner) SetNetworkIsolation(n *drivers.NetworkIsolationSpec) {
tr.networkIsolationLock.Unlock()
}
// SetNetworkStatus is called from the allocrunner to propagate the
// network status of an allocation. This call occurs once the network hook has
// run and allows this information to be exported as env vars within the
// taskenv.
func (tr *TaskRunner) SetNetworkStatus(s *structs.AllocNetworkStatus) {
tr.allocNetworkStatusLock.Lock()
tr.allocNetworkStatus = s
tr.allocNetworkStatusLock.Unlock()
// Update the taskenv builder.
tr.envBuilder = tr.envBuilder.SetNetworkStatus(s)
}
// triggerUpdate if there isn't already an update pending. Should be called
// instead of calling updateHooks directly to serialize runs of update hooks.
// TaskRunner state should be updated prior to triggering update hooks.

View File

@@ -120,6 +120,11 @@ const (
// UpstreamPrefix is the prefix for passing upstream IP and ports to the alloc
UpstreamPrefix = "NOMAD_UPSTREAM_"
// AllocPrefix is a general purpose alloc prefix. It is currently used as
// the env var prefix used to export network namespace information
// including IP, Port, and interface.
AllocPrefix = "NOMAD_ALLOC_"
// VaultToken is the environment variable for passing the Vault token
VaultToken = "VAULT_TOKEN"
@@ -446,6 +451,9 @@ type Builder struct {
// and affect network env vars.
networks []*structs.NetworkResource
networkStatus *structs.AllocNetworkStatus
allocatedPorts structs.AllocatedPorts
// hookEnvs are env vars set by hooks and stored by hook name to
// support adding/removing vars from multiple hooks (eg HookA adds A:1,
// HookB adds A:2, HookA removes A, A should equal 2)
@@ -565,6 +573,12 @@ func (b *Builder) buildEnv(allocDir, localDir, secretsDir string,
// Build the Consul Connect upstream env vars
buildUpstreamsEnv(envMap, b.upstreams)
// Build the network namespace information if we have the required detail
// available.
if b.networkStatus != nil && b.allocatedPorts != nil {
addNomadAllocNetwork(envMap, b.allocatedPorts, b.networkStatus)
}
// Build the Vault Token
if b.injectVaultToken && b.vaultToken != "" {
envMap[VaultToken] = b.vaultToken
@@ -816,6 +830,7 @@ func (b *Builder) setAlloc(alloc *structs.Allocation) *Builder {
// Add any allocated host ports
if alloc.AllocatedResources.Shared.Ports != nil {
b.allocatedPorts = alloc.AllocatedResources.Shared.Ports
addPorts(b.otherPorts, alloc.AllocatedResources.Shared.Ports)
}
}
@@ -960,6 +975,13 @@ func (b *Builder) setUpstreamsLocked(upstreams []structs.ConsulUpstream) *Builde
return b
}
func (b *Builder) SetNetworkStatus(netStatus *structs.AllocNetworkStatus) *Builder {
b.mu.Lock()
defer b.mu.Unlock()
b.networkStatus = netStatus
return b
}
// buildUpstreamsEnv builds NOMAD_UPSTREAM_{IP,PORT,ADDR}_{destination} vars
func buildUpstreamsEnv(envMap map[string]string, upstreams []structs.ConsulUpstream) {
// Proxy sidecars always bind to localhost
@@ -978,6 +1000,18 @@ func buildUpstreamsEnv(envMap map[string]string, upstreams []structs.ConsulUpstr
}
}
// addNomadAllocNetwork builds NOMAD_ALLOC_{IP,INTERFACE,ADDR}_{port_label}
// vars. NOMAD_ALLOC_PORT_* is handled within addPorts and therefore omitted
// from this function.
func addNomadAllocNetwork(envMap map[string]string, p structs.AllocatedPorts, netStatus *structs.AllocNetworkStatus) {
for _, allocatedPort := range p {
portStr := strconv.Itoa(allocatedPort.To)
envMap[AllocPrefix+"INTERFACE_"+allocatedPort.Label] = netStatus.InterfaceName
envMap[AllocPrefix+"IP_"+allocatedPort.Label] = netStatus.Address
envMap[AllocPrefix+"ADDR_"+allocatedPort.Label] = net.JoinHostPort(netStatus.Address, portStr)
}
}
// SetPortMapEnvs sets the PortMap related environment variables on the map
func SetPortMapEnvs(envs map[string]string, ports map[string]int) map[string]string {
if envs == nil {

View File

@@ -339,6 +339,13 @@ func TestEnvironment_AllValues(t *testing.T) {
&drivers.DriverNetwork{PortMap: map[string]int{"https": 443}},
)
// Setting the network status ensures we trigger the addNomadAllocNetwork
// for the test.
env = env.SetNetworkStatus(&structs.AllocNetworkStatus{
InterfaceName: "eth0",
Address: "172.26.64.19",
})
// Add a host environment variable which matches a task variable. It means
// we can test to ensure the allocation ID variable from the task overrides
// that found on the host. The second entry tests to ensure other host env
@@ -438,6 +445,9 @@ func TestEnvironment_AllValues(t *testing.T) {
"NOMAD_PORT_admin": "9000",
"NOMAD_ALLOC_PORT_admin": "9000",
"NOMAD_HOST_PORT_admin": "32000",
"NOMAD_ALLOC_INTERFACE_admin": "eth0",
"NOMAD_ALLOC_IP_admin": "172.26.64.19",
"NOMAD_ALLOC_ADDR_admin": "172.26.64.19:9000",
// Env vars from the host.
"LC_CTYPE": "C.UTF-8",
@@ -814,6 +824,58 @@ func TestEnvironment_Upstreams(t *testing.T) {
require.Equal(t, "1234", env["bar"])
}
func Test_addNetNamespacePort(t *testing.T) {
testCases := []struct {
inputPorts structs.AllocatedPorts
inputNetwork *structs.AllocNetworkStatus
expectedOutput map[string]string
name string
}{
{
inputPorts: structs.AllocatedPorts{
{Label: "http", To: 80},
},
inputNetwork: &structs.AllocNetworkStatus{
InterfaceName: "eth0",
Address: "172.26.64.11",
},
expectedOutput: map[string]string{
"NOMAD_ALLOC_INTERFACE_http": "eth0",
"NOMAD_ALLOC_IP_http": "172.26.64.11",
"NOMAD_ALLOC_ADDR_http": "172.26.64.11:80",
},
name: "single input port",
},
{
inputPorts: structs.AllocatedPorts{
{Label: "http", To: 80},
{Label: "https", To: 443},
},
inputNetwork: &structs.AllocNetworkStatus{
InterfaceName: "eth0",
Address: "172.26.64.11",
},
expectedOutput: map[string]string{
"NOMAD_ALLOC_INTERFACE_http": "eth0",
"NOMAD_ALLOC_IP_http": "172.26.64.11",
"NOMAD_ALLOC_ADDR_http": "172.26.64.11:80",
"NOMAD_ALLOC_INTERFACE_https": "eth0",
"NOMAD_ALLOC_IP_https": "172.26.64.11",
"NOMAD_ALLOC_ADDR_https": "172.26.64.11:443",
},
name: "multiple input ports",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
inputMap := make(map[string]string)
addNomadAllocNetwork(inputMap, tc.inputPorts, tc.inputNetwork)
assert.Equal(t, tc.expectedOutput, inputMap, tc.name)
})
}
}
func TestEnvironment_SetPortMapEnvs(t *testing.T) {
ci.Parallel(t)

View File

@@ -0,0 +1,29 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
job "networking" {
datacenters = ["dc1", "dc2"]
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
group "bridged" {
network {
mode = "bridge"
port "dummy" {
to = 13130
}
}
task "sleep" {
driver = "docker"
config {
image = "busybox:1"
command = "/bin/sleep"
args = ["300"]
}
}
}
}

View File

@@ -95,3 +95,27 @@ func (tc *NetworkingE2ETest) TestNetworking_DockerBridgedHostnameInterpolation(f
f.NoError(err, "failed to run hostname exec command")
f.Contains(hostsOutput, "mylittlepony-0", "/etc/hosts doesn't contain hostname entry")
}
func (tc *NetworkingE2ETest) TestNetworking_DockerBridgedCNIEnvVars(f *framework.F) {
jobID := "test-networking-" + uuid.Generate()[0:8]
f.NoError(e2eutil.Register(jobID, "networking/inputs/docker_bridged_basic.nomad"))
tc.jobIDs = append(tc.jobIDs, jobID)
f.NoError(e2eutil.WaitForAllocStatusExpected(jobID, "default", []string{"running"}),
"job should be running with 1 alloc")
// Grab the allocations for the job.
allocs, _, err := tc.Nomad().Jobs().Allocations(jobID, false, nil)
f.NoError(err, "failed to get allocs for job")
f.Len(allocs, 1, "job should have one alloc")
// Run the env command within the allocation.
envOutput, err := e2eutil.AllocExec(allocs[0].ID, "sleep", "env", "default", nil)
f.NoError(err, "failed to run env exec command")
// Check all the network namespace env vars are present.
f.Contains(envOutput, "NOMAD_ALLOC_INTERFACE_dummy", "namespace interface env var not found")
f.Contains(envOutput, "NOMAD_ALLOC_IP_dummy", "namespace ip env var not found")
f.Contains(envOutput, "NOMAD_ALLOC_PORT_dummy", "namespace port env var not found")
f.Contains(envOutput, "NOMAD_ALLOC_ADDR_dummy", "namespace addr env var not found")
}

View File

@@ -31,10 +31,13 @@
### Network-related Variables
| Variable | Description |
| ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `NOMAD_IP_<label>` | Host IP for the given port `label`. See the [`network` block documentation][network-block] for more information. |
| `NOMAD_PORT_<label>` | Port for the given port `label`. Driver-specified port when a port map is used, otherwise the host's static or dynamic port allocation. Services should bind to this port. See the [`network` block documentation][network-block] for more information. |
| `NOMAD_ADDR_<label>` | Host `IP:Port` pair for the given port `label`. |
| `NOMAD_ALLOC_INTERFACE_<label>` | The configured network namespace interface for the given port `label` when using bridged or CNI networking. |
| `NOMAD_ALLOC_IP_<label>` | The configured network namespace IP for the given port `label` when using bridged or CNI networking. |
| `NOMAD_ALLOC_ADDR_<label>` | The configured network namespace `IP:Port` pair for the given port `label` when using bridged or CNI networking. |
| `NOMAD_HOST_PORT_<label>` | Port on the host for the port `label`. See the [**Mapped Ports**](/nomad/docs/job-specification/network#mapped-ports) section of the `network` block documentation for more information. |
| `NOMAD_UPSTREAM_IP_<service>` | IP for the given `service` when defined as a Consul service mesh [upstream][]. |
| `NOMAD_UPSTREAM_PORT_<service>` | Port for the given `service` when defined as a Consul service mesh [upstream][]. |