mirror of
https://github.com/kemko/nomad.git
synced 2026-01-01 16:05:42 +03:00
docker: validate that containers do not run as ContainerAdmin on Windows (#23443)
This enables checks for ContainerAdmin user on docker images on Windows. It's only checked if users run docker with process isolation and not hyper-v, because hyper-v provides its own, proper sandboxing. --------- Co-authored-by: Tim Gross <tgross@hashicorp.com>
This commit is contained in:
committed by
GitHub
parent
df67e74615
commit
0ece7b5c16
3
.changelog/23443.txt
Normal file
3
.changelog/23443.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
docker: Validate that unprivileged containers aren't running as ContainerAdmin on Windows
|
||||
```
|
||||
@@ -307,6 +307,13 @@ var (
|
||||
// disable_log_collection indicates whether docker driver should collect logs of docker
|
||||
// task containers. If true, nomad doesn't start docker_logger/logmon processes
|
||||
"disable_log_collection": hclspec.NewAttr("disable_log_collection", "bool", false),
|
||||
|
||||
// windows_allow_insecure_container_admin indicates that on windows,
|
||||
// docker checks the task.user field or, if unset, the container image
|
||||
// manifest after pulling the container, to see if it's running as
|
||||
// ContainerAdmin. If so, exits with an error unless the task config has
|
||||
// privileged=true.
|
||||
"windows_allow_insecure_container_admin": hclspec.NewAttr("windows_allow_insecure_container_admin", "bool", false),
|
||||
})
|
||||
|
||||
// mountBodySpec is the hcl specification for the `mount` block
|
||||
@@ -653,25 +660,26 @@ type ContainerGCConfig struct {
|
||||
}
|
||||
|
||||
type DriverConfig struct {
|
||||
Endpoint string `codec:"endpoint"`
|
||||
Auth AuthConfig `codec:"auth"`
|
||||
TLS TLSConfig `codec:"tls"`
|
||||
GC GCConfig `codec:"gc"`
|
||||
Volumes VolumeConfig `codec:"volumes"`
|
||||
AllowPrivileged bool `codec:"allow_privileged"`
|
||||
AllowCaps []string `codec:"allow_caps"`
|
||||
GPURuntimeName string `codec:"nvidia_runtime"`
|
||||
InfraImage string `codec:"infra_image"`
|
||||
InfraImagePullTimeout string `codec:"infra_image_pull_timeout"`
|
||||
infraImagePullTimeoutDuration time.Duration `codec:"-"`
|
||||
ContainerExistsAttempts uint64 `codec:"container_exists_attempts"`
|
||||
DisableLogCollection bool `codec:"disable_log_collection"`
|
||||
PullActivityTimeout string `codec:"pull_activity_timeout"`
|
||||
PidsLimit int64 `codec:"pids_limit"`
|
||||
pullActivityTimeoutDuration time.Duration `codec:"-"`
|
||||
OOMScoreAdj int `codec:"oom_score_adj"`
|
||||
ExtraLabels []string `codec:"extra_labels"`
|
||||
Logging LoggingConfig `codec:"logging"`
|
||||
Endpoint string `codec:"endpoint"`
|
||||
Auth AuthConfig `codec:"auth"`
|
||||
TLS TLSConfig `codec:"tls"`
|
||||
GC GCConfig `codec:"gc"`
|
||||
Volumes VolumeConfig `codec:"volumes"`
|
||||
AllowPrivileged bool `codec:"allow_privileged"`
|
||||
AllowCaps []string `codec:"allow_caps"`
|
||||
GPURuntimeName string `codec:"nvidia_runtime"`
|
||||
InfraImage string `codec:"infra_image"`
|
||||
InfraImagePullTimeout string `codec:"infra_image_pull_timeout"`
|
||||
infraImagePullTimeoutDuration time.Duration `codec:"-"`
|
||||
ContainerExistsAttempts uint64 `codec:"container_exists_attempts"`
|
||||
DisableLogCollection bool `codec:"disable_log_collection"`
|
||||
PullActivityTimeout string `codec:"pull_activity_timeout"`
|
||||
PidsLimit int64 `codec:"pids_limit"`
|
||||
pullActivityTimeoutDuration time.Duration `codec:"-"`
|
||||
OOMScoreAdj int `codec:"oom_score_adj"`
|
||||
WindowsAllowInsecureContainerAdmin bool `codec:"windows_allow_insecure_container_admin"`
|
||||
ExtraLabels []string `codec:"extra_labels"`
|
||||
Logging LoggingConfig `codec:"logging"`
|
||||
|
||||
AllowRuntimesList []string `codec:"allow_runtimes"`
|
||||
allowRuntimes map[string]struct{} `codec:"-"`
|
||||
|
||||
@@ -751,6 +751,35 @@ func TestConfig_DriverConfig_OOMScoreAdj(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_DriverConfig_WindowsAllowInsecureContainerAdmin(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
config string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "default",
|
||||
config: `{}`,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "set explicitly",
|
||||
config: `{ windows_allow_insecure_container_admin = true }`,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
var tc DriverConfig
|
||||
hclutils.NewConfigParser(configSpec).ParseHCL(t, "config "+c.config, &tc)
|
||||
must.Eq(t, c.expected, tc.WindowsAllowInsecureContainerAdmin)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_DriverConfig_InfraImagePullTimeout(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
||||
@@ -21,13 +21,14 @@ var (
|
||||
imageNotFoundMatcher = regexp.MustCompile(`Error: image .+ not found`)
|
||||
)
|
||||
|
||||
// pullFuture is a sharable future for retrieving a pulled images ID and any
|
||||
// error that may have occurred during the pull.
|
||||
// pullFuture is a sharable future for retrieving a pulled images ID and user,
|
||||
// and any error that may have occurred during the pull.
|
||||
type pullFuture struct {
|
||||
waitCh chan struct{}
|
||||
|
||||
err error
|
||||
imageID string
|
||||
err error
|
||||
imageID string
|
||||
imageUser string
|
||||
}
|
||||
|
||||
// newPullFuture returns a new pull future
|
||||
@@ -45,14 +46,15 @@ func (p *pullFuture) wait() *pullFuture {
|
||||
|
||||
// result returns the results of the future and should only ever be called after
|
||||
// wait returns.
|
||||
func (p *pullFuture) result() (imageID string, err error) {
|
||||
return p.imageID, p.err
|
||||
func (p *pullFuture) result() (imageID, imageUser string, err error) {
|
||||
return p.imageID, p.imageUser, p.err
|
||||
}
|
||||
|
||||
// set is used to set the results and unblock any waiter. This may only be
|
||||
// called once.
|
||||
func (p *pullFuture) set(imageID string, err error) {
|
||||
func (p *pullFuture) set(imageID, imageUser string, err error) {
|
||||
p.imageID = imageID
|
||||
p.imageUser = imageUser
|
||||
p.err = err
|
||||
close(p.waitCh)
|
||||
}
|
||||
@@ -135,7 +137,7 @@ func newDockerCoordinator(config *dockerCoordinatorConfig) *dockerCoordinator {
|
||||
// PullImage is used to pull an image. It returns the pulled imaged ID or an
|
||||
// error that occurred during the pull
|
||||
func (d *dockerCoordinator) PullImage(image string, authOptions *docker.AuthConfiguration, callerID string,
|
||||
emitFn LogEventFn, pullTimeout, pullActivityTimeout time.Duration) (imageID string, err error) {
|
||||
emitFn LogEventFn, pullTimeout, pullActivityTimeout time.Duration) (imageID, imageUser string, err error) {
|
||||
// Get the future
|
||||
d.imageLock.Lock()
|
||||
future, ok := d.pullFutures[image]
|
||||
@@ -149,7 +151,7 @@ func (d *dockerCoordinator) PullImage(image string, authOptions *docker.AuthConf
|
||||
d.imageLock.Unlock()
|
||||
|
||||
// We unlock while we wait since this can take a while
|
||||
id, err := future.wait().result()
|
||||
id, user, err := future.wait().result()
|
||||
|
||||
d.imageLock.Lock()
|
||||
defer d.imageLock.Unlock()
|
||||
@@ -164,7 +166,7 @@ func (d *dockerCoordinator) PullImage(image string, authOptions *docker.AuthConf
|
||||
d.incrementImageReferenceImpl(id, image, callerID)
|
||||
}
|
||||
|
||||
return id, err
|
||||
return id, user, err
|
||||
}
|
||||
|
||||
// pullImageImpl is the implementation of pulling an image. The results are
|
||||
@@ -200,14 +202,14 @@ func (d *dockerCoordinator) pullImageImpl(image string, authOptions *docker.Auth
|
||||
|
||||
if ctxErr := ctx.Err(); ctxErr == context.DeadlineExceeded {
|
||||
d.logger.Error("timeout pulling container", "image_ref", dockerImageRef(repo, tag))
|
||||
future.set("", recoverablePullError(ctxErr, image))
|
||||
future.set("", "", recoverablePullError(ctxErr, image))
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
d.logger.Error("failed pulling container", "image_ref", dockerImageRef(repo, tag),
|
||||
"error", err)
|
||||
future.set("", recoverablePullError(err, image))
|
||||
future.set("", "", recoverablePullError(err, image))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -216,11 +218,16 @@ func (d *dockerCoordinator) pullImageImpl(image string, authOptions *docker.Auth
|
||||
dockerImage, err := d.client.InspectImage(image)
|
||||
if err != nil {
|
||||
d.logger.Error("failed getting image id", "image_name", image, "error", err)
|
||||
future.set("", recoverableErrTimeouts(err))
|
||||
future.set("", "", recoverableErrTimeouts(err))
|
||||
return
|
||||
}
|
||||
|
||||
future.set(dockerImage.ID, nil)
|
||||
var imageUser string
|
||||
if dockerImage.Config != nil {
|
||||
imageUser = dockerImage.Config.User
|
||||
}
|
||||
|
||||
future.set(dockerImage.ID, imageUser, err)
|
||||
}
|
||||
|
||||
// IncrementImageReference is used to increment an image reference count
|
||||
|
||||
@@ -77,7 +77,7 @@ func TestDockerCoordinator_ConcurrentPulls(t *testing.T) {
|
||||
// Create a coordinator
|
||||
coordinator := newDockerCoordinator(config)
|
||||
|
||||
id, _ := coordinator.PullImage(image, nil, uuid.Generate(), nil, 5*time.Minute, 2*time.Minute)
|
||||
id, _, _ := coordinator.PullImage(image, nil, uuid.Generate(), nil, 5*time.Minute, 2*time.Minute)
|
||||
for i := 0; i < 9; i++ {
|
||||
go func() {
|
||||
coordinator.PullImage(image, nil, uuid.Generate(), nil, 5*time.Minute, 2*time.Minute)
|
||||
@@ -133,7 +133,7 @@ func TestDockerCoordinator_Pull_Remove(t *testing.T) {
|
||||
callerIDs := make([]string, 10, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
callerIDs[i] = uuid.Generate()
|
||||
id, _ = coordinator.PullImage(image, nil, callerIDs[i], nil, 5*time.Minute, 2*time.Minute)
|
||||
id, _, _ = coordinator.PullImage(image, nil, callerIDs[i], nil, 5*time.Minute, 2*time.Minute)
|
||||
}
|
||||
|
||||
// Check the reference count
|
||||
@@ -203,7 +203,7 @@ func TestDockerCoordinator_Remove_Cancel(t *testing.T) {
|
||||
callerID := uuid.Generate()
|
||||
|
||||
// Pull image
|
||||
id, _ := coordinator.PullImage(image, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
|
||||
id, _, _ := coordinator.PullImage(image, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
|
||||
|
||||
// Check the reference count
|
||||
if references := coordinator.imageRefCount[id]; len(references) != 1 {
|
||||
@@ -219,7 +219,7 @@ func TestDockerCoordinator_Remove_Cancel(t *testing.T) {
|
||||
}
|
||||
|
||||
// Pull image again within delay
|
||||
id, _ = coordinator.PullImage(image, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
|
||||
id, _, _ = coordinator.PullImage(image, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
|
||||
|
||||
// Check the reference count
|
||||
if references := coordinator.imageRefCount[id]; len(references) != 1 {
|
||||
@@ -252,7 +252,7 @@ func TestDockerCoordinator_No_Cleanup(t *testing.T) {
|
||||
callerID := uuid.Generate()
|
||||
|
||||
// Pull image
|
||||
id, _ := coordinator.PullImage(image, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
|
||||
id, _, _ := coordinator.PullImage(image, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
|
||||
|
||||
// Check the reference count
|
||||
if references := coordinator.imageRefCount[id]; len(references) != 0 {
|
||||
@@ -292,10 +292,10 @@ func TestDockerCoordinator_Cleanup_HonorsCtx(t *testing.T) {
|
||||
callerID := uuid.Generate()
|
||||
|
||||
// Pull image
|
||||
id1, _ := coordinator.PullImage(image1ID, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
|
||||
id1, _, _ := coordinator.PullImage(image1ID, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
|
||||
require.Len(t, coordinator.imageRefCount[id1], 1, "image reference count")
|
||||
|
||||
id2, _ := coordinator.PullImage(image2ID, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
|
||||
id2, _, _ := coordinator.PullImage(image2ID, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
|
||||
require.Len(t, coordinator.imageRefCount[id2], 1, "image reference count")
|
||||
|
||||
// remove one image, cancel ctx, remove second, and assert only first image is cleanedup
|
||||
|
||||
@@ -340,11 +340,16 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
|
||||
return nil, nil, fmt.Errorf("Failed to create long operations docker client: %v", err)
|
||||
}
|
||||
|
||||
id, err := d.createImage(cfg, &driverConfig, dockerClient)
|
||||
id, user, err := d.createImage(cfg, &driverConfig, dockerClient)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// validate the image user (windows only)
|
||||
if err := validateImageUser(user, cfg.User, &driverConfig, d.config); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
err = d.convertAllocPathsForWindowsLCOW(cfg, driverConfig.Image)
|
||||
if err != nil {
|
||||
@@ -593,7 +598,7 @@ START:
|
||||
|
||||
// createImage creates a docker image either by pulling it from a registry or by
|
||||
// loading it from the file system
|
||||
func (d *Driver) createImage(task *drivers.TaskConfig, driverConfig *TaskConfig, client *docker.Client) (string, error) {
|
||||
func (d *Driver) createImage(task *drivers.TaskConfig, driverConfig *TaskConfig, client *docker.Client) (string, string, error) {
|
||||
image := driverConfig.Image
|
||||
repo, tag := parseDockerImage(image)
|
||||
|
||||
@@ -606,7 +611,11 @@ func (d *Driver) createImage(task *drivers.TaskConfig, driverConfig *TaskConfig,
|
||||
if dockerImage, _ := client.InspectImage(image); dockerImage != nil {
|
||||
// Image exists so just increment its reference count
|
||||
d.coordinator.IncrementImageReference(dockerImage.ID, image, task.ID)
|
||||
return dockerImage.ID, nil
|
||||
var user string
|
||||
if dockerImage.Config != nil {
|
||||
user = dockerImage.Config.User
|
||||
}
|
||||
return dockerImage.ID, user, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -616,17 +625,17 @@ func (d *Driver) createImage(task *drivers.TaskConfig, driverConfig *TaskConfig,
|
||||
}
|
||||
|
||||
// Download the image
|
||||
return d.pullImage(task, driverConfig, client, repo, tag)
|
||||
return d.pullImage(task, driverConfig, repo, tag)
|
||||
}
|
||||
|
||||
// pullImage creates an image by pulling it from a docker registry
|
||||
func (d *Driver) pullImage(task *drivers.TaskConfig, driverConfig *TaskConfig, client *docker.Client, repo, tag string) (id string, err error) {
|
||||
func (d *Driver) pullImage(task *drivers.TaskConfig, driverConfig *TaskConfig, repo, tag string) (id, user string, err error) {
|
||||
authOptions, err := d.resolveRegistryAuthentication(driverConfig, repo)
|
||||
if err != nil {
|
||||
if driverConfig.AuthSoftFail {
|
||||
d.logger.Warn("Failed to find docker repo auth", "repo", repo, "error", err)
|
||||
} else {
|
||||
return "", fmt.Errorf("Failed to find docker auth for repo %q: %v", repo, err)
|
||||
return "", "", fmt.Errorf("Failed to find docker auth for repo %q: %v", repo, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -647,7 +656,7 @@ func (d *Driver) pullImage(task *drivers.TaskConfig, driverConfig *TaskConfig, c
|
||||
|
||||
pullDur, err := time.ParseDuration(driverConfig.ImagePullTimeout)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to parse image_pull_timeout: %v", err)
|
||||
return "", "", fmt.Errorf("Failed to parse image_pull_timeout: %v", err)
|
||||
}
|
||||
|
||||
return d.coordinator.PullImage(driverConfig.Image, authOptions, task.ID, d.emitEventFunc(task), pullDur, d.config.pullActivityTimeoutDuration)
|
||||
@@ -680,28 +689,32 @@ func (d *Driver) resolveRegistryAuthentication(driverConfig *TaskConfig, repo st
|
||||
}
|
||||
|
||||
// loadImage creates an image by loading it from the file system
|
||||
func (d *Driver) loadImage(task *drivers.TaskConfig, driverConfig *TaskConfig, client *docker.Client) (id string, err error) {
|
||||
func (d *Driver) loadImage(task *drivers.TaskConfig, driverConfig *TaskConfig, client *docker.Client) (id string, user string, err error) {
|
||||
|
||||
archive := filepath.Join(task.TaskDir().LocalDir, driverConfig.LoadImage)
|
||||
d.logger.Debug("loading image from disk", "archive", archive)
|
||||
|
||||
f, err := os.Open(archive)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to open image archive: %v", err)
|
||||
return "", "", fmt.Errorf("unable to open image archive: %v", err)
|
||||
}
|
||||
|
||||
if err := client.LoadImage(docker.LoadImageOptions{InputStream: f}); err != nil {
|
||||
return "", err
|
||||
return "", "", err
|
||||
}
|
||||
f.Close()
|
||||
|
||||
dockerImage, err := client.InspectImage(driverConfig.Image)
|
||||
if err != nil {
|
||||
return "", recoverableErrTimeouts(err)
|
||||
return "", "", recoverableErrTimeouts(err)
|
||||
}
|
||||
|
||||
d.coordinator.IncrementImageReference(dockerImage.ID, driverConfig.Image, task.ID)
|
||||
return dockerImage.ID, nil
|
||||
var imageUser string
|
||||
if dockerImage.Config != nil {
|
||||
imageUser = dockerImage.Config.User
|
||||
}
|
||||
return dockerImage.ID, imageUser, nil
|
||||
}
|
||||
|
||||
func (d *Driver) convertAllocPathsForWindowsLCOW(task *drivers.TaskConfig, image string) error {
|
||||
|
||||
@@ -12,3 +12,7 @@ import (
|
||||
func getPortBinding(ip string, port string) docker.PortBinding {
|
||||
return docker.PortBinding{HostIP: ip, HostPort: port}
|
||||
}
|
||||
|
||||
func validateImageUser(imageUser, taskUser string, taskDriverConfig *TaskConfig, driverConfig *DriverConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -343,7 +343,7 @@ func TestDockerDriver_Start_StoppedContainer(t *testing.T) {
|
||||
var err error
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
imageID, err = d.Impl().(*Driver).loadImage(task, &taskCfg, client)
|
||||
imageID, _, err = d.Impl().(*Driver).loadImage(task, &taskCfg, client)
|
||||
} else {
|
||||
image, lErr := client.InspectImage(taskCfg.Image)
|
||||
err = lErr
|
||||
@@ -398,7 +398,7 @@ func TestDockerDriver_ContainerAlreadyExists(t *testing.T) {
|
||||
d, ok := driver.Impl().(*Driver)
|
||||
must.True(t, ok)
|
||||
|
||||
_, err := d.createImage(task, cfg, client)
|
||||
_, _, err := d.createImage(task, cfg, client)
|
||||
must.NoError(t, err)
|
||||
|
||||
containerCfg, err := d.createContainerConfig(task, cfg, cfg.Image)
|
||||
@@ -2834,7 +2834,7 @@ func TestDockerDriver_CreationIdempotent(t *testing.T) {
|
||||
d, ok := driver.Impl().(*Driver)
|
||||
require.True(t, ok)
|
||||
|
||||
_, err := d.createImage(task, cfg, client)
|
||||
_, _, err := d.createImage(task, cfg, client)
|
||||
require.NoError(t, err)
|
||||
|
||||
containerCfg, err := d.createContainerConfig(task, cfg, cfg.Image)
|
||||
|
||||
@@ -5,7 +5,11 @@
|
||||
|
||||
package docker
|
||||
|
||||
import docker "github.com/fsouza/go-dockerclient"
|
||||
import (
|
||||
"errors"
|
||||
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
// Currently Windows containers don't support host ip in port binding.
|
||||
func getPortBinding(ip string, port string) docker.PortBinding {
|
||||
@@ -15,3 +19,19 @@ func getPortBinding(ip string, port string) docker.PortBinding {
|
||||
func tweakCapabilities(basics, adds, drops []string) ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var containerAdminErrMsg = "running container as ContainerAdmin is unsafe; change the container user, set task configuration to privileged or enable windows_allow_insecure_container_admin to disable this check"
|
||||
|
||||
func validateImageUser(user, taskUser string, taskDriverConfig *TaskConfig, driverConfig *DriverConfig) error {
|
||||
// we're only interested in the case where isolation is set to "process"
|
||||
// (it's also the default) and when windows_allow_insecure_container_admin
|
||||
// is explicitly set to true in the config
|
||||
if driverConfig.WindowsAllowInsecureContainerAdmin || taskDriverConfig.Isolation == "hyper-v" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if user == "ContainerAdmin" && (taskUser == "ContainerAdmin" || taskUser == "") && !taskDriverConfig.Privileged {
|
||||
return errors.New(containerAdminErrMsg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,8 +8,12 @@ package docker
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/shoenig/test/must"
|
||||
)
|
||||
|
||||
func newTaskConfig(variant string, command []string) TaskConfig {
|
||||
@@ -28,3 +32,101 @@ func newTaskConfig(variant string, command []string) TaskConfig {
|
||||
// No-op on windows because we don't load images.
|
||||
func copyImage(t *testing.T, taskDir *allocdir.TaskDir, image string) {
|
||||
}
|
||||
|
||||
func Test_validateImageUser(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
taskCfg := &drivers.TaskConfig{
|
||||
ID: uuid.Generate(),
|
||||
Name: "busybox-demo",
|
||||
User: "nomadUser",
|
||||
}
|
||||
taskDriverCfg := newTaskConfig("", []string{"sh", "-c", "sleep 1"})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
taskUser string
|
||||
containerUser string
|
||||
privileged bool
|
||||
isolation string
|
||||
driverConfig *DriverConfig
|
||||
wantErr bool
|
||||
want string
|
||||
}{
|
||||
{
|
||||
"normal user",
|
||||
"nomadUser",
|
||||
"nomadUser",
|
||||
false,
|
||||
"process",
|
||||
&DriverConfig{},
|
||||
false,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"ContainerAdmin image user, non-priviliged",
|
||||
"",
|
||||
"ContainerAdmin",
|
||||
false,
|
||||
"process",
|
||||
&DriverConfig{},
|
||||
true,
|
||||
containerAdminErrMsg,
|
||||
},
|
||||
{
|
||||
"ContainerAdmin image user, non-priviliged, but hyper-v",
|
||||
"",
|
||||
"ContainerAdmin",
|
||||
false,
|
||||
"hyper-v",
|
||||
&DriverConfig{},
|
||||
false,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"ContainerAdmin task user, non-priviliged",
|
||||
"",
|
||||
"ContainerAdmin",
|
||||
false,
|
||||
"process",
|
||||
&DriverConfig{},
|
||||
true,
|
||||
containerAdminErrMsg,
|
||||
},
|
||||
{
|
||||
"ContainerAdmin image user, non-priviliged, but overriden by task user",
|
||||
"ContainerUser",
|
||||
"ContainerAdmin",
|
||||
false,
|
||||
"process",
|
||||
&DriverConfig{},
|
||||
false,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"ContainerAdmin image user, non-priviliged, but overriden by windows_allow_insecure_container_admin",
|
||||
"ContainerAdmin",
|
||||
"ContainerAdmin",
|
||||
false,
|
||||
"process",
|
||||
&DriverConfig{WindowsAllowInsecureContainerAdmin: true},
|
||||
false,
|
||||
"",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
taskCfg.User = tt.taskUser
|
||||
taskDriverCfg.Privileged = tt.privileged
|
||||
taskDriverCfg.Isolation = tt.isolation
|
||||
|
||||
err := validateImageUser(tt.containerUser, tt.taskUser, &taskDriverCfg, tt.driverConfig)
|
||||
if tt.wantErr {
|
||||
must.Error(t, err)
|
||||
must.Eq(t, tt.want, containerAdminErrMsg)
|
||||
} else {
|
||||
must.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,6 +226,6 @@ func (d *Driver) pullInfraImage(allocID string) error {
|
||||
d.logger.Debug("auth failed for infra_image container pull", "image", d.config.InfraImage, "error", err)
|
||||
}
|
||||
|
||||
_, err = d.coordinator.PullImage(d.config.InfraImage, authOptions, allocID, noopLogEventFn, d.config.infraImagePullTimeoutDuration, d.config.pullActivityTimeoutDuration)
|
||||
_, _, err = d.coordinator.PullImage(d.config.InfraImage, authOptions, allocID, noopLogEventFn, d.config.infraImagePullTimeoutDuration, d.config.pullActivityTimeoutDuration)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1003,6 +1003,11 @@ host system.
|
||||
wait before cancelling an in-progress pull of the Docker image as specified in
|
||||
`infra_image`. Defaults to `"5m"`.
|
||||
|
||||
- `windows_allow_insecure_container_admin` - Indicates that on windows, docker
|
||||
checks the `task.user` field or, if unset, the container image manifest after
|
||||
pulling the container, to see if it's running as `ContainerAdmin`. If so, exits
|
||||
with an error unless the task config has `privileged=true`. Defaults to `false`.
|
||||
|
||||
## Client Configuration
|
||||
|
||||
~> Note: client configuration options will soon be deprecated. Please use
|
||||
|
||||
@@ -13,7 +13,17 @@ upgrade. However, specific versions of Nomad may have more details provided for
|
||||
their upgrades as a result of new features or changed behavior. This page is
|
||||
used to document those details separately from the standard upgrade flow.
|
||||
|
||||
## Nomad 1.8.1 (UNRELEASED)
|
||||
## Nomad 1.8.2 (UNRELEASED)
|
||||
|
||||
#### New `windows_allow_insecure_container_admin` configuration option for Docker driver
|
||||
|
||||
In 1.8.2, Nomad will refuse to run jobs that use the Docker driver on Windows
|
||||
with [Process Isolation][] that run as `ContainerAdmin`. This is in order to
|
||||
provide a more secure environment for these jobs, and this behavior can be
|
||||
overridden by setting the new `windows_allow_insecure_container_admin` Docker
|
||||
plugin configuration option to `true` or by setting `privileged=true`.
|
||||
|
||||
## Nomad 1.8.1
|
||||
|
||||
<EnterpriseAlert inline />
|
||||
|
||||
@@ -2067,3 +2077,4 @@ deleted and then Nomad 0.3.0 can be launched.
|
||||
[validate]: /nomad/docs/commands/job/validate
|
||||
[vault_grace]: /nomad/docs/job-specification/template
|
||||
[Workload Identity]: /nomad/docs/concepts/workload-identity
|
||||
[Process Isolation]: https://learn.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container#process-isolation
|
||||
|
||||
Reference in New Issue
Block a user