mirror of
https://github.com/kemko/nomad.git
synced 2026-01-01 16:05:42 +03:00
cni: fix regression in falling back to DNS owned by dockerd (#20189)
In #20007 we fixed a bug where the DNS configuration set by CNI plugins was not threaded through to the task configuration. This resulted in a regression where a DNS override set by `dockerd` was not respected for `bridge` mode networking. Our existing handling of CNI DNS incorrectly assumed that the DNS field would be empty, when in fact it contains a single empty DNS struct. Handle this case correctly by checking whether the DNS struct we get back from CNI has any nameservers, and ignore it if it doesn't. Expand test coverage of this case. Fixes: https://github.com/hashicorp/nomad/issues/20174
This commit is contained in:
3
.changelog/20189.txt
Normal file
3
.changelog/20189.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
cni: Fixed a regression where default DNS set by `dockerd` or other task drivers was not respected
|
||||
```
|
||||
@@ -184,12 +184,15 @@ func (c *cniNetworkConfigurator) cniToAllocNet(res *cni.Result) (*structs.AllocN
|
||||
|
||||
}
|
||||
|
||||
// Use the first DNS results.
|
||||
// Use the first DNS results, if non-empty
|
||||
if len(res.DNS) > 0 {
|
||||
netStatus.DNS = &structs.DNSConfig{
|
||||
Servers: res.DNS[0].Nameservers,
|
||||
Searches: res.DNS[0].Search,
|
||||
Options: res.DNS[0].Options,
|
||||
cniDNS := res.DNS[0]
|
||||
if len(cniDNS.Nameservers) > 0 {
|
||||
netStatus.DNS = &structs.DNSConfig{
|
||||
Servers: cniDNS.Nameservers,
|
||||
Searches: cniDNS.Search,
|
||||
Options: cniDNS.Options,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,10 +11,11 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/go-cni"
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/shoenig/test"
|
||||
"github.com/shoenig/test/must"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -148,9 +149,8 @@ func TestCNI_cniToAllocNet_NoInterfaces(t *testing.T) {
|
||||
func TestCNI_cniToAllocNet_Fallback(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
// Calico's CNI plugin v3.12.3 has been observed to return the
|
||||
// following:
|
||||
cniResult := &cni.Result{
|
||||
// Calico's CNI plugin v3.12.3 has been observed to return the following:
|
||||
Interfaces: map[string]*cni.Config{
|
||||
"cali39179aa3-74": {},
|
||||
"eth0": {
|
||||
@@ -161,6 +161,8 @@ func TestCNI_cniToAllocNet_Fallback(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
// cni.Result will return a single empty struct, not an empty slice
|
||||
DNS: []types.DNS{{}},
|
||||
}
|
||||
|
||||
// Only need a logger
|
||||
@@ -168,11 +170,11 @@ func TestCNI_cniToAllocNet_Fallback(t *testing.T) {
|
||||
logger: testlog.HCLogger(t),
|
||||
}
|
||||
allocNet, err := c.cniToAllocNet(cniResult)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, allocNet)
|
||||
assert.Equal(t, "192.168.135.232", allocNet.Address)
|
||||
assert.Equal(t, "eth0", allocNet.InterfaceName)
|
||||
assert.Nil(t, allocNet.DNS)
|
||||
must.NoError(t, err)
|
||||
must.NotNil(t, allocNet)
|
||||
test.Eq(t, "192.168.135.232", allocNet.Address)
|
||||
test.Eq(t, "eth0", allocNet.InterfaceName)
|
||||
test.Nil(t, allocNet.DNS)
|
||||
}
|
||||
|
||||
// TestCNI_cniToAllocNet_Invalid asserts an error is returned if a CNI plugin
|
||||
|
||||
@@ -2935,41 +2935,39 @@ func TestTaskRunner_IdentityHook_Disabled(t *testing.T) {
|
||||
func TestTaskRunner_AllocNetworkStatus(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
// Mock task with group network
|
||||
alloc1 := mock.Alloc()
|
||||
task1 := alloc1.Job.TaskGroups[0].Tasks[0]
|
||||
alloc1.AllocatedResources.Shared.Networks = []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
DNS: &structs.DNSConfig{
|
||||
Servers: []string{"1.1.1.1", "8.8.8.8"},
|
||||
Searches: []string{"test.local"},
|
||||
Options: []string{"ndots:1"},
|
||||
},
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
DynamicPorts: []structs.Port{{Label: "http", Value: 9876}},
|
||||
}}
|
||||
task1.Driver = "mock_driver"
|
||||
task1.Config = map[string]interface{}{"run_for": "2s"}
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
task.Config = map[string]interface{}{"run_for": "2s"}
|
||||
|
||||
// Mock task with task networking only
|
||||
alloc2 := mock.Alloc()
|
||||
task2 := alloc2.Job.TaskGroups[0].Tasks[0]
|
||||
task2.Driver = "mock_driver"
|
||||
task2.Config = map[string]interface{}{"run_for": "2s"}
|
||||
groupNetworks := []*structs.NetworkResource{{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
DNS: &structs.DNSConfig{
|
||||
Servers: []string{"1.1.1.1", "8.8.8.8"},
|
||||
Searches: []string{"test.local"},
|
||||
Options: []string{"ndots:1"},
|
||||
},
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
DynamicPorts: []structs.Port{{Label: "http", Value: 9876}},
|
||||
}}
|
||||
|
||||
groupNetworksWithoutDNS := []*structs.NetworkResource{{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
DynamicPorts: []structs.Port{{Label: "http", Value: 9876}},
|
||||
}}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
alloc *structs.Allocation
|
||||
task *structs.Task
|
||||
fromCNI *structs.DNSConfig
|
||||
expect *drivers.DNSConfig
|
||||
name string
|
||||
networks []*structs.NetworkResource
|
||||
fromCNI *structs.DNSConfig
|
||||
expect *drivers.DNSConfig
|
||||
}{
|
||||
{
|
||||
name: "task with group networking overrides CNI",
|
||||
alloc: alloc1,
|
||||
task: task1,
|
||||
name: "task with group networking overrides CNI",
|
||||
networks: groupNetworks,
|
||||
fromCNI: &structs.DNSConfig{
|
||||
Servers: []string{"10.37.105.17"},
|
||||
Searches: []string{"node.consul"},
|
||||
@@ -2982,9 +2980,7 @@ func TestTaskRunner_AllocNetworkStatus(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "task with CNI alone",
|
||||
alloc: alloc2,
|
||||
task: task1,
|
||||
name: "task with CNI alone",
|
||||
fromCNI: &structs.DNSConfig{
|
||||
Servers: []string{"10.37.105.17"},
|
||||
Searches: []string{"node.consul"},
|
||||
@@ -2997,10 +2993,9 @@ func TestTaskRunner_AllocNetworkStatus(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "task with group networking alone",
|
||||
alloc: alloc1,
|
||||
task: task1,
|
||||
fromCNI: nil,
|
||||
name: "task with group networking alone wth DNS",
|
||||
networks: groupNetworks,
|
||||
fromCNI: nil,
|
||||
expect: &drivers.DNSConfig{
|
||||
Servers: []string{"1.1.1.1", "8.8.8.8"},
|
||||
Searches: []string{"test.local"},
|
||||
@@ -3008,9 +3003,13 @@ func TestTaskRunner_AllocNetworkStatus(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "task without group networking",
|
||||
alloc: alloc2,
|
||||
task: task2,
|
||||
name: "task with group networking and no CNI dns",
|
||||
networks: groupNetworksWithoutDNS,
|
||||
fromCNI: &structs.DNSConfig{},
|
||||
expect: &drivers.DNSConfig{},
|
||||
},
|
||||
{
|
||||
name: "task without group networking or CNI",
|
||||
fromCNI: nil,
|
||||
expect: nil,
|
||||
},
|
||||
@@ -3019,7 +3018,10 @@ func TestTaskRunner_AllocNetworkStatus(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
conf, cleanup := testTaskRunnerConfig(t, tc.alloc, tc.task.Name, nil)
|
||||
testAlloc := alloc.Copy()
|
||||
testAlloc.AllocatedResources.Shared.Networks = tc.networks
|
||||
|
||||
conf, cleanup := testTaskRunnerConfig(t, testAlloc, task.Name, nil)
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
// note this will never actually be set if we don't have group/CNI
|
||||
|
||||
Reference in New Issue
Block a user