docker: stop network pause container of lost alloc after node restart (#17455)

This PR fixes a bug where the docker network pause container would not be
stopped and removed in the case where a node is restarted, the alloc is
moved to another node, the node comes back up. See the issue below for
full repro conditions.

Basically in the DestroyNetwork PostRun hook we would depend on the
NetworkIsolationSpec field not being nil - which is only the case
if the Client stays alive all the way from network creation to network
teardown. If the node is rebooted we lose that state and previously
would not be able to find the pause container to remove. Now, we manually
find the pause container by scanning them and looking for the associated
allocID.

Fixes #17299
This commit is contained in:
Seth Hoenig
2023-06-09 08:46:29 -05:00
committed by GitHub
parent 408ab828f7
commit 89ce092b20
7 changed files with 142 additions and 38 deletions

View File

@@ -35,6 +35,7 @@ import (
"github.com/hashicorp/nomad/plugins/drivers"
pstructs "github.com/hashicorp/nomad/plugins/shared/structs"
"github.com/ryanuber/go-glob"
"golang.org/x/exp/slices"
)
var (
@@ -760,6 +761,39 @@ func (d *Driver) containerBinds(task *drivers.TaskConfig, driverConfig *TaskConf
return binds, nil
}
func (d *Driver) findPauseContainer(allocID string) (string, error) {
_, dockerClient, err := d.dockerClients()
if err != nil {
return "", err
}
containers, listErr := dockerClient.ListContainers(docker.ListContainersOptions{
Context: d.ctx,
All: false, // running only
Filters: map[string][]string{
"label": {dockerLabelAllocID},
},
})
if listErr != nil {
d.logger.Error("failed to list pause containers for recovery", "error", listErr)
return "", listErr
}
for _, c := range containers {
if !slices.ContainsFunc(c.Names, func(s string) bool {
return strings.HasPrefix(s, "/nomad_init_")
}) {
continue
}
if c.Labels[dockerLabelAllocID] == allocID {
return c.ID, nil
}
}
return "", nil
}
// recoverPauseContainers gets called when we start up the plugin. On client
// restarts we need to rebuild the set of pause containers we are
// tracking. Basically just scan all containers and pull the ID from anything
@@ -779,7 +813,7 @@ func (d *Driver) recoverPauseContainers(ctx context.Context) {
},
})
if listErr != nil && listErr != ctx.Err() {
d.logger.Error("failed to list pause containers", "error", listErr)
d.logger.Error("failed to list pause containers for recovery", "error", listErr)
return
}

View File

@@ -93,7 +93,30 @@ func (d *Driver) CreateNetwork(allocID string, createSpec *drivers.NetworkCreate
}
func (d *Driver) DestroyNetwork(allocID string, spec *drivers.NetworkIsolationSpec) error {
id := spec.Labels[dockerNetSpecLabelKey]
var (
id string
err error
)
if spec != nil {
// if we have the spec we can just read the container id
id = spec.Labels[dockerNetSpecLabelKey]
} else {
// otherwise we need to scan all the containers and find the pause container
// associated with this allocation - this happens when the client is
// restarted since we do not persist the network spec
id, err = d.findPauseContainer(allocID)
}
if err != nil {
return err
}
if id == "" {
d.logger.Debug("failed to find pause container to cleanup", "alloc_id", allocID)
return nil
}
// no longer tracking this pause container; even if we fail here we should
// let the background reconciliation keep trying
@@ -104,11 +127,16 @@ func (d *Driver) DestroyNetwork(allocID string, spec *drivers.NetworkIsolationSp
return fmt.Errorf("failed to connect to docker daemon: %s", err)
}
timeout := uint(1) // this is the pause container, just kill it fast
if err := client.StopContainerWithContext(id, timeout, d.ctx); err != nil {
d.logger.Warn("failed to stop pause container", "id", id, "error", err)
}
if err := client.RemoveContainer(docker.RemoveContainerOptions{
Force: true,
ID: id,
}); err != nil {
return err
return fmt.Errorf("failed to remove pause container: %w", err)
}
if d.config.GC.Image {