docker: validate that containers do not run as ContainerAdmin on Windows (#23443)

This enables checks for ContainerAdmin user on docker images on Windows. It's
only checked if users run docker with process isolation and not hyper-v,
because hyper-v provides its own, proper sandboxing.

---------

Co-authored-by: Tim Gross <tgross@hashicorp.com>
This commit is contained in:
Piotr Kazmierczak
2024-06-27 14:22:24 +00:00
committed by GitHub
parent df67e74615
commit 0ece7b5c16
13 changed files with 260 additions and 58 deletions

3
.changelog/23443.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
docker: Validate that unprivileged containers aren't running as ContainerAdmin on Windows
```

View File

@@ -307,6 +307,13 @@ var (
// disable_log_collection indicates whether docker driver should collect logs of docker // disable_log_collection indicates whether docker driver should collect logs of docker
// task containers. If true, nomad doesn't start docker_logger/logmon processes // task containers. If true, nomad doesn't start docker_logger/logmon processes
"disable_log_collection": hclspec.NewAttr("disable_log_collection", "bool", false), "disable_log_collection": hclspec.NewAttr("disable_log_collection", "bool", false),
// windows_allow_insecure_container_admin indicates that on windows,
// docker checks the task.user field or, if unset, the container image
// manifest after pulling the container, to see if it's running as
// ContainerAdmin. If so, exits with an error unless the task config has
// privileged=true.
"windows_allow_insecure_container_admin": hclspec.NewAttr("windows_allow_insecure_container_admin", "bool", false),
}) })
// mountBodySpec is the hcl specification for the `mount` block // mountBodySpec is the hcl specification for the `mount` block
@@ -653,25 +660,26 @@ type ContainerGCConfig struct {
} }
type DriverConfig struct { type DriverConfig struct {
Endpoint string `codec:"endpoint"` Endpoint string `codec:"endpoint"`
Auth AuthConfig `codec:"auth"` Auth AuthConfig `codec:"auth"`
TLS TLSConfig `codec:"tls"` TLS TLSConfig `codec:"tls"`
GC GCConfig `codec:"gc"` GC GCConfig `codec:"gc"`
Volumes VolumeConfig `codec:"volumes"` Volumes VolumeConfig `codec:"volumes"`
AllowPrivileged bool `codec:"allow_privileged"` AllowPrivileged bool `codec:"allow_privileged"`
AllowCaps []string `codec:"allow_caps"` AllowCaps []string `codec:"allow_caps"`
GPURuntimeName string `codec:"nvidia_runtime"` GPURuntimeName string `codec:"nvidia_runtime"`
InfraImage string `codec:"infra_image"` InfraImage string `codec:"infra_image"`
InfraImagePullTimeout string `codec:"infra_image_pull_timeout"` InfraImagePullTimeout string `codec:"infra_image_pull_timeout"`
infraImagePullTimeoutDuration time.Duration `codec:"-"` infraImagePullTimeoutDuration time.Duration `codec:"-"`
ContainerExistsAttempts uint64 `codec:"container_exists_attempts"` ContainerExistsAttempts uint64 `codec:"container_exists_attempts"`
DisableLogCollection bool `codec:"disable_log_collection"` DisableLogCollection bool `codec:"disable_log_collection"`
PullActivityTimeout string `codec:"pull_activity_timeout"` PullActivityTimeout string `codec:"pull_activity_timeout"`
PidsLimit int64 `codec:"pids_limit"` PidsLimit int64 `codec:"pids_limit"`
pullActivityTimeoutDuration time.Duration `codec:"-"` pullActivityTimeoutDuration time.Duration `codec:"-"`
OOMScoreAdj int `codec:"oom_score_adj"` OOMScoreAdj int `codec:"oom_score_adj"`
ExtraLabels []string `codec:"extra_labels"` WindowsAllowInsecureContainerAdmin bool `codec:"windows_allow_insecure_container_admin"`
Logging LoggingConfig `codec:"logging"` ExtraLabels []string `codec:"extra_labels"`
Logging LoggingConfig `codec:"logging"`
AllowRuntimesList []string `codec:"allow_runtimes"` AllowRuntimesList []string `codec:"allow_runtimes"`
allowRuntimes map[string]struct{} `codec:"-"` allowRuntimes map[string]struct{} `codec:"-"`

View File

@@ -751,6 +751,35 @@ func TestConfig_DriverConfig_OOMScoreAdj(t *testing.T) {
} }
} }
func TestConfig_DriverConfig_WindowsAllowInsecureContainerAdmin(t *testing.T) {
ci.Parallel(t)
cases := []struct {
name string
config string
expected bool
}{
{
name: "default",
config: `{}`,
expected: false,
},
{
name: "set explicitly",
config: `{ windows_allow_insecure_container_admin = true }`,
expected: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var tc DriverConfig
hclutils.NewConfigParser(configSpec).ParseHCL(t, "config "+c.config, &tc)
must.Eq(t, c.expected, tc.WindowsAllowInsecureContainerAdmin)
})
}
}
func TestConfig_DriverConfig_InfraImagePullTimeout(t *testing.T) { func TestConfig_DriverConfig_InfraImagePullTimeout(t *testing.T) {
ci.Parallel(t) ci.Parallel(t)

View File

@@ -21,13 +21,14 @@ var (
imageNotFoundMatcher = regexp.MustCompile(`Error: image .+ not found`) imageNotFoundMatcher = regexp.MustCompile(`Error: image .+ not found`)
) )
// pullFuture is a sharable future for retrieving a pulled images ID and any // pullFuture is a sharable future for retrieving a pulled images ID and user,
// error that may have occurred during the pull. // and any error that may have occurred during the pull.
type pullFuture struct { type pullFuture struct {
waitCh chan struct{} waitCh chan struct{}
err error err error
imageID string imageID string
imageUser string
} }
// newPullFuture returns a new pull future // newPullFuture returns a new pull future
@@ -45,14 +46,15 @@ func (p *pullFuture) wait() *pullFuture {
// result returns the results of the future and should only ever be called after // result returns the results of the future and should only ever be called after
// wait returns. // wait returns.
func (p *pullFuture) result() (imageID string, err error) { func (p *pullFuture) result() (imageID, imageUser string, err error) {
return p.imageID, p.err return p.imageID, p.imageUser, p.err
} }
// set is used to set the results and unblock any waiter. This may only be // set is used to set the results and unblock any waiter. This may only be
// called once. // called once.
func (p *pullFuture) set(imageID string, err error) { func (p *pullFuture) set(imageID, imageUser string, err error) {
p.imageID = imageID p.imageID = imageID
p.imageUser = imageUser
p.err = err p.err = err
close(p.waitCh) close(p.waitCh)
} }
@@ -135,7 +137,7 @@ func newDockerCoordinator(config *dockerCoordinatorConfig) *dockerCoordinator {
// PullImage is used to pull an image. It returns the pulled imaged ID or an // PullImage is used to pull an image. It returns the pulled imaged ID or an
// error that occurred during the pull // error that occurred during the pull
func (d *dockerCoordinator) PullImage(image string, authOptions *docker.AuthConfiguration, callerID string, func (d *dockerCoordinator) PullImage(image string, authOptions *docker.AuthConfiguration, callerID string,
emitFn LogEventFn, pullTimeout, pullActivityTimeout time.Duration) (imageID string, err error) { emitFn LogEventFn, pullTimeout, pullActivityTimeout time.Duration) (imageID, imageUser string, err error) {
// Get the future // Get the future
d.imageLock.Lock() d.imageLock.Lock()
future, ok := d.pullFutures[image] future, ok := d.pullFutures[image]
@@ -149,7 +151,7 @@ func (d *dockerCoordinator) PullImage(image string, authOptions *docker.AuthConf
d.imageLock.Unlock() d.imageLock.Unlock()
// We unlock while we wait since this can take a while // We unlock while we wait since this can take a while
id, err := future.wait().result() id, user, err := future.wait().result()
d.imageLock.Lock() d.imageLock.Lock()
defer d.imageLock.Unlock() defer d.imageLock.Unlock()
@@ -164,7 +166,7 @@ func (d *dockerCoordinator) PullImage(image string, authOptions *docker.AuthConf
d.incrementImageReferenceImpl(id, image, callerID) d.incrementImageReferenceImpl(id, image, callerID)
} }
return id, err return id, user, err
} }
// pullImageImpl is the implementation of pulling an image. The results are // pullImageImpl is the implementation of pulling an image. The results are
@@ -200,14 +202,14 @@ func (d *dockerCoordinator) pullImageImpl(image string, authOptions *docker.Auth
if ctxErr := ctx.Err(); ctxErr == context.DeadlineExceeded { if ctxErr := ctx.Err(); ctxErr == context.DeadlineExceeded {
d.logger.Error("timeout pulling container", "image_ref", dockerImageRef(repo, tag)) d.logger.Error("timeout pulling container", "image_ref", dockerImageRef(repo, tag))
future.set("", recoverablePullError(ctxErr, image)) future.set("", "", recoverablePullError(ctxErr, image))
return return
} }
if err != nil { if err != nil {
d.logger.Error("failed pulling container", "image_ref", dockerImageRef(repo, tag), d.logger.Error("failed pulling container", "image_ref", dockerImageRef(repo, tag),
"error", err) "error", err)
future.set("", recoverablePullError(err, image)) future.set("", "", recoverablePullError(err, image))
return return
} }
@@ -216,11 +218,16 @@ func (d *dockerCoordinator) pullImageImpl(image string, authOptions *docker.Auth
dockerImage, err := d.client.InspectImage(image) dockerImage, err := d.client.InspectImage(image)
if err != nil { if err != nil {
d.logger.Error("failed getting image id", "image_name", image, "error", err) d.logger.Error("failed getting image id", "image_name", image, "error", err)
future.set("", recoverableErrTimeouts(err)) future.set("", "", recoverableErrTimeouts(err))
return return
} }
future.set(dockerImage.ID, nil) var imageUser string
if dockerImage.Config != nil {
imageUser = dockerImage.Config.User
}
future.set(dockerImage.ID, imageUser, err)
} }
// IncrementImageReference is used to increment an image reference count // IncrementImageReference is used to increment an image reference count

View File

@@ -77,7 +77,7 @@ func TestDockerCoordinator_ConcurrentPulls(t *testing.T) {
// Create a coordinator // Create a coordinator
coordinator := newDockerCoordinator(config) coordinator := newDockerCoordinator(config)
id, _ := coordinator.PullImage(image, nil, uuid.Generate(), nil, 5*time.Minute, 2*time.Minute) id, _, _ := coordinator.PullImage(image, nil, uuid.Generate(), nil, 5*time.Minute, 2*time.Minute)
for i := 0; i < 9; i++ { for i := 0; i < 9; i++ {
go func() { go func() {
coordinator.PullImage(image, nil, uuid.Generate(), nil, 5*time.Minute, 2*time.Minute) coordinator.PullImage(image, nil, uuid.Generate(), nil, 5*time.Minute, 2*time.Minute)
@@ -133,7 +133,7 @@ func TestDockerCoordinator_Pull_Remove(t *testing.T) {
callerIDs := make([]string, 10, 10) callerIDs := make([]string, 10, 10)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
callerIDs[i] = uuid.Generate() callerIDs[i] = uuid.Generate()
id, _ = coordinator.PullImage(image, nil, callerIDs[i], nil, 5*time.Minute, 2*time.Minute) id, _, _ = coordinator.PullImage(image, nil, callerIDs[i], nil, 5*time.Minute, 2*time.Minute)
} }
// Check the reference count // Check the reference count
@@ -203,7 +203,7 @@ func TestDockerCoordinator_Remove_Cancel(t *testing.T) {
callerID := uuid.Generate() callerID := uuid.Generate()
// Pull image // Pull image
id, _ := coordinator.PullImage(image, nil, callerID, nil, 5*time.Minute, 2*time.Minute) id, _, _ := coordinator.PullImage(image, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
// Check the reference count // Check the reference count
if references := coordinator.imageRefCount[id]; len(references) != 1 { if references := coordinator.imageRefCount[id]; len(references) != 1 {
@@ -219,7 +219,7 @@ func TestDockerCoordinator_Remove_Cancel(t *testing.T) {
} }
// Pull image again within delay // Pull image again within delay
id, _ = coordinator.PullImage(image, nil, callerID, nil, 5*time.Minute, 2*time.Minute) id, _, _ = coordinator.PullImage(image, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
// Check the reference count // Check the reference count
if references := coordinator.imageRefCount[id]; len(references) != 1 { if references := coordinator.imageRefCount[id]; len(references) != 1 {
@@ -252,7 +252,7 @@ func TestDockerCoordinator_No_Cleanup(t *testing.T) {
callerID := uuid.Generate() callerID := uuid.Generate()
// Pull image // Pull image
id, _ := coordinator.PullImage(image, nil, callerID, nil, 5*time.Minute, 2*time.Minute) id, _, _ := coordinator.PullImage(image, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
// Check the reference count // Check the reference count
if references := coordinator.imageRefCount[id]; len(references) != 0 { if references := coordinator.imageRefCount[id]; len(references) != 0 {
@@ -292,10 +292,10 @@ func TestDockerCoordinator_Cleanup_HonorsCtx(t *testing.T) {
callerID := uuid.Generate() callerID := uuid.Generate()
// Pull image // Pull image
id1, _ := coordinator.PullImage(image1ID, nil, callerID, nil, 5*time.Minute, 2*time.Minute) id1, _, _ := coordinator.PullImage(image1ID, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
require.Len(t, coordinator.imageRefCount[id1], 1, "image reference count") require.Len(t, coordinator.imageRefCount[id1], 1, "image reference count")
id2, _ := coordinator.PullImage(image2ID, nil, callerID, nil, 5*time.Minute, 2*time.Minute) id2, _, _ := coordinator.PullImage(image2ID, nil, callerID, nil, 5*time.Minute, 2*time.Minute)
require.Len(t, coordinator.imageRefCount[id2], 1, "image reference count") require.Len(t, coordinator.imageRefCount[id2], 1, "image reference count")
// remove one image, cancel ctx, remove second, and assert only first image is cleanedup // remove one image, cancel ctx, remove second, and assert only first image is cleanedup

View File

@@ -340,11 +340,16 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
return nil, nil, fmt.Errorf("Failed to create long operations docker client: %v", err) return nil, nil, fmt.Errorf("Failed to create long operations docker client: %v", err)
} }
id, err := d.createImage(cfg, &driverConfig, dockerClient) id, user, err := d.createImage(cfg, &driverConfig, dockerClient)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
// validate the image user (windows only)
if err := validateImageUser(user, cfg.User, &driverConfig, d.config); err != nil {
return nil, nil, err
}
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
err = d.convertAllocPathsForWindowsLCOW(cfg, driverConfig.Image) err = d.convertAllocPathsForWindowsLCOW(cfg, driverConfig.Image)
if err != nil { if err != nil {
@@ -593,7 +598,7 @@ START:
// createImage creates a docker image either by pulling it from a registry or by // createImage creates a docker image either by pulling it from a registry or by
// loading it from the file system // loading it from the file system
func (d *Driver) createImage(task *drivers.TaskConfig, driverConfig *TaskConfig, client *docker.Client) (string, error) { func (d *Driver) createImage(task *drivers.TaskConfig, driverConfig *TaskConfig, client *docker.Client) (string, string, error) {
image := driverConfig.Image image := driverConfig.Image
repo, tag := parseDockerImage(image) repo, tag := parseDockerImage(image)
@@ -606,7 +611,11 @@ func (d *Driver) createImage(task *drivers.TaskConfig, driverConfig *TaskConfig,
if dockerImage, _ := client.InspectImage(image); dockerImage != nil { if dockerImage, _ := client.InspectImage(image); dockerImage != nil {
// Image exists so just increment its reference count // Image exists so just increment its reference count
d.coordinator.IncrementImageReference(dockerImage.ID, image, task.ID) d.coordinator.IncrementImageReference(dockerImage.ID, image, task.ID)
return dockerImage.ID, nil var user string
if dockerImage.Config != nil {
user = dockerImage.Config.User
}
return dockerImage.ID, user, nil
} }
} }
@@ -616,17 +625,17 @@ func (d *Driver) createImage(task *drivers.TaskConfig, driverConfig *TaskConfig,
} }
// Download the image // Download the image
return d.pullImage(task, driverConfig, client, repo, tag) return d.pullImage(task, driverConfig, repo, tag)
} }
// pullImage creates an image by pulling it from a docker registry // pullImage creates an image by pulling it from a docker registry
func (d *Driver) pullImage(task *drivers.TaskConfig, driverConfig *TaskConfig, client *docker.Client, repo, tag string) (id string, err error) { func (d *Driver) pullImage(task *drivers.TaskConfig, driverConfig *TaskConfig, repo, tag string) (id, user string, err error) {
authOptions, err := d.resolveRegistryAuthentication(driverConfig, repo) authOptions, err := d.resolveRegistryAuthentication(driverConfig, repo)
if err != nil { if err != nil {
if driverConfig.AuthSoftFail { if driverConfig.AuthSoftFail {
d.logger.Warn("Failed to find docker repo auth", "repo", repo, "error", err) d.logger.Warn("Failed to find docker repo auth", "repo", repo, "error", err)
} else { } else {
return "", fmt.Errorf("Failed to find docker auth for repo %q: %v", repo, err) return "", "", fmt.Errorf("Failed to find docker auth for repo %q: %v", repo, err)
} }
} }
@@ -647,7 +656,7 @@ func (d *Driver) pullImage(task *drivers.TaskConfig, driverConfig *TaskConfig, c
pullDur, err := time.ParseDuration(driverConfig.ImagePullTimeout) pullDur, err := time.ParseDuration(driverConfig.ImagePullTimeout)
if err != nil { if err != nil {
return "", fmt.Errorf("Failed to parse image_pull_timeout: %v", err) return "", "", fmt.Errorf("Failed to parse image_pull_timeout: %v", err)
} }
return d.coordinator.PullImage(driverConfig.Image, authOptions, task.ID, d.emitEventFunc(task), pullDur, d.config.pullActivityTimeoutDuration) return d.coordinator.PullImage(driverConfig.Image, authOptions, task.ID, d.emitEventFunc(task), pullDur, d.config.pullActivityTimeoutDuration)
@@ -680,28 +689,32 @@ func (d *Driver) resolveRegistryAuthentication(driverConfig *TaskConfig, repo st
} }
// loadImage creates an image by loading it from the file system // loadImage creates an image by loading it from the file system
func (d *Driver) loadImage(task *drivers.TaskConfig, driverConfig *TaskConfig, client *docker.Client) (id string, err error) { func (d *Driver) loadImage(task *drivers.TaskConfig, driverConfig *TaskConfig, client *docker.Client) (id string, user string, err error) {
archive := filepath.Join(task.TaskDir().LocalDir, driverConfig.LoadImage) archive := filepath.Join(task.TaskDir().LocalDir, driverConfig.LoadImage)
d.logger.Debug("loading image from disk", "archive", archive) d.logger.Debug("loading image from disk", "archive", archive)
f, err := os.Open(archive) f, err := os.Open(archive)
if err != nil { if err != nil {
return "", fmt.Errorf("unable to open image archive: %v", err) return "", "", fmt.Errorf("unable to open image archive: %v", err)
} }
if err := client.LoadImage(docker.LoadImageOptions{InputStream: f}); err != nil { if err := client.LoadImage(docker.LoadImageOptions{InputStream: f}); err != nil {
return "", err return "", "", err
} }
f.Close() f.Close()
dockerImage, err := client.InspectImage(driverConfig.Image) dockerImage, err := client.InspectImage(driverConfig.Image)
if err != nil { if err != nil {
return "", recoverableErrTimeouts(err) return "", "", recoverableErrTimeouts(err)
} }
d.coordinator.IncrementImageReference(dockerImage.ID, driverConfig.Image, task.ID) d.coordinator.IncrementImageReference(dockerImage.ID, driverConfig.Image, task.ID)
return dockerImage.ID, nil var imageUser string
if dockerImage.Config != nil {
imageUser = dockerImage.Config.User
}
return dockerImage.ID, imageUser, nil
} }
func (d *Driver) convertAllocPathsForWindowsLCOW(task *drivers.TaskConfig, image string) error { func (d *Driver) convertAllocPathsForWindowsLCOW(task *drivers.TaskConfig, image string) error {

View File

@@ -12,3 +12,7 @@ import (
func getPortBinding(ip string, port string) docker.PortBinding { func getPortBinding(ip string, port string) docker.PortBinding {
return docker.PortBinding{HostIP: ip, HostPort: port} return docker.PortBinding{HostIP: ip, HostPort: port}
} }
func validateImageUser(imageUser, taskUser string, taskDriverConfig *TaskConfig, driverConfig *DriverConfig) error {
return nil
}

View File

@@ -343,7 +343,7 @@ func TestDockerDriver_Start_StoppedContainer(t *testing.T) {
var err error var err error
if runtime.GOOS != "windows" { if runtime.GOOS != "windows" {
imageID, err = d.Impl().(*Driver).loadImage(task, &taskCfg, client) imageID, _, err = d.Impl().(*Driver).loadImage(task, &taskCfg, client)
} else { } else {
image, lErr := client.InspectImage(taskCfg.Image) image, lErr := client.InspectImage(taskCfg.Image)
err = lErr err = lErr
@@ -398,7 +398,7 @@ func TestDockerDriver_ContainerAlreadyExists(t *testing.T) {
d, ok := driver.Impl().(*Driver) d, ok := driver.Impl().(*Driver)
must.True(t, ok) must.True(t, ok)
_, err := d.createImage(task, cfg, client) _, _, err := d.createImage(task, cfg, client)
must.NoError(t, err) must.NoError(t, err)
containerCfg, err := d.createContainerConfig(task, cfg, cfg.Image) containerCfg, err := d.createContainerConfig(task, cfg, cfg.Image)
@@ -2834,7 +2834,7 @@ func TestDockerDriver_CreationIdempotent(t *testing.T) {
d, ok := driver.Impl().(*Driver) d, ok := driver.Impl().(*Driver)
require.True(t, ok) require.True(t, ok)
_, err := d.createImage(task, cfg, client) _, _, err := d.createImage(task, cfg, client)
require.NoError(t, err) require.NoError(t, err)
containerCfg, err := d.createContainerConfig(task, cfg, cfg.Image) containerCfg, err := d.createContainerConfig(task, cfg, cfg.Image)

View File

@@ -5,7 +5,11 @@
package docker package docker
import docker "github.com/fsouza/go-dockerclient" import (
"errors"
docker "github.com/fsouza/go-dockerclient"
)
// Currently Windows containers don't support host ip in port binding. // Currently Windows containers don't support host ip in port binding.
func getPortBinding(ip string, port string) docker.PortBinding { func getPortBinding(ip string, port string) docker.PortBinding {
@@ -15,3 +19,19 @@ func getPortBinding(ip string, port string) docker.PortBinding {
func tweakCapabilities(basics, adds, drops []string) ([]string, error) { func tweakCapabilities(basics, adds, drops []string) ([]string, error) {
return nil, nil return nil, nil
} }
var containerAdminErrMsg = "running container as ContainerAdmin is unsafe; change the container user, set task configuration to privileged or enable windows_allow_insecure_container_admin to disable this check"
func validateImageUser(user, taskUser string, taskDriverConfig *TaskConfig, driverConfig *DriverConfig) error {
// we're only interested in the case where isolation is set to "process"
// (it's also the default) and when windows_allow_insecure_container_admin
// is explicitly set to true in the config
if driverConfig.WindowsAllowInsecureContainerAdmin || taskDriverConfig.Isolation == "hyper-v" {
return nil
}
if user == "ContainerAdmin" && (taskUser == "ContainerAdmin" || taskUser == "") && !taskDriverConfig.Privileged {
return errors.New(containerAdminErrMsg)
}
return nil
}

View File

@@ -8,8 +8,12 @@ package docker
import ( import (
"testing" "testing"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/plugins/drivers"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
"github.com/shoenig/test/must"
) )
func newTaskConfig(variant string, command []string) TaskConfig { func newTaskConfig(variant string, command []string) TaskConfig {
@@ -28,3 +32,101 @@ func newTaskConfig(variant string, command []string) TaskConfig {
// No-op on windows because we don't load images. // No-op on windows because we don't load images.
func copyImage(t *testing.T, taskDir *allocdir.TaskDir, image string) { func copyImage(t *testing.T, taskDir *allocdir.TaskDir, image string) {
} }
func Test_validateImageUser(t *testing.T) {
ci.Parallel(t)
taskCfg := &drivers.TaskConfig{
ID: uuid.Generate(),
Name: "busybox-demo",
User: "nomadUser",
}
taskDriverCfg := newTaskConfig("", []string{"sh", "-c", "sleep 1"})
tests := []struct {
name string
taskUser string
containerUser string
privileged bool
isolation string
driverConfig *DriverConfig
wantErr bool
want string
}{
{
"normal user",
"nomadUser",
"nomadUser",
false,
"process",
&DriverConfig{},
false,
"",
},
{
"ContainerAdmin image user, non-priviliged",
"",
"ContainerAdmin",
false,
"process",
&DriverConfig{},
true,
containerAdminErrMsg,
},
{
"ContainerAdmin image user, non-priviliged, but hyper-v",
"",
"ContainerAdmin",
false,
"hyper-v",
&DriverConfig{},
false,
"",
},
{
"ContainerAdmin task user, non-priviliged",
"",
"ContainerAdmin",
false,
"process",
&DriverConfig{},
true,
containerAdminErrMsg,
},
{
"ContainerAdmin image user, non-priviliged, but overriden by task user",
"ContainerUser",
"ContainerAdmin",
false,
"process",
&DriverConfig{},
false,
"",
},
{
"ContainerAdmin image user, non-priviliged, but overriden by windows_allow_insecure_container_admin",
"ContainerAdmin",
"ContainerAdmin",
false,
"process",
&DriverConfig{WindowsAllowInsecureContainerAdmin: true},
false,
"",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
taskCfg.User = tt.taskUser
taskDriverCfg.Privileged = tt.privileged
taskDriverCfg.Isolation = tt.isolation
err := validateImageUser(tt.containerUser, tt.taskUser, &taskDriverCfg, tt.driverConfig)
if tt.wantErr {
must.Error(t, err)
must.Eq(t, tt.want, containerAdminErrMsg)
} else {
must.NoError(t, err)
}
})
}
}

View File

@@ -226,6 +226,6 @@ func (d *Driver) pullInfraImage(allocID string) error {
d.logger.Debug("auth failed for infra_image container pull", "image", d.config.InfraImage, "error", err) d.logger.Debug("auth failed for infra_image container pull", "image", d.config.InfraImage, "error", err)
} }
_, err = d.coordinator.PullImage(d.config.InfraImage, authOptions, allocID, noopLogEventFn, d.config.infraImagePullTimeoutDuration, d.config.pullActivityTimeoutDuration) _, _, err = d.coordinator.PullImage(d.config.InfraImage, authOptions, allocID, noopLogEventFn, d.config.infraImagePullTimeoutDuration, d.config.pullActivityTimeoutDuration)
return err return err
} }

View File

@@ -1003,6 +1003,11 @@ host system.
wait before cancelling an in-progress pull of the Docker image as specified in wait before cancelling an in-progress pull of the Docker image as specified in
`infra_image`. Defaults to `"5m"`. `infra_image`. Defaults to `"5m"`.
- `windows_allow_insecure_container_admin` - Indicates that on windows, docker
checks the `task.user` field or, if unset, the container image manifest after
pulling the container, to see if it's running as `ContainerAdmin`. If so, exits
with an error unless the task config has `privileged=true`. Defaults to `false`.
## Client Configuration ## Client Configuration
~> Note: client configuration options will soon be deprecated. Please use ~> Note: client configuration options will soon be deprecated. Please use

View File

@@ -13,7 +13,17 @@ upgrade. However, specific versions of Nomad may have more details provided for
their upgrades as a result of new features or changed behavior. This page is their upgrades as a result of new features or changed behavior. This page is
used to document those details separately from the standard upgrade flow. used to document those details separately from the standard upgrade flow.
## Nomad 1.8.1 (UNRELEASED) ## Nomad 1.8.2 (UNRELEASED)
#### New `windows_allow_insecure_container_admin` configuration option for Docker driver
In 1.8.2, Nomad will refuse to run jobs that use the Docker driver on Windows
with [Process Isolation][] that run as `ContainerAdmin`. This is in order to
provide a more secure environment for these jobs, and this behavior can be
overridden by setting the new `windows_allow_insecure_container_admin` Docker
plugin configuration option to `true` or by setting `privileged=true`.
## Nomad 1.8.1
<EnterpriseAlert inline /> <EnterpriseAlert inline />
@@ -2067,3 +2077,4 @@ deleted and then Nomad 0.3.0 can be launched.
[validate]: /nomad/docs/commands/job/validate [validate]: /nomad/docs/commands/job/validate
[vault_grace]: /nomad/docs/job-specification/template [vault_grace]: /nomad/docs/job-specification/template
[Workload Identity]: /nomad/docs/concepts/workload-identity [Workload Identity]: /nomad/docs/concepts/workload-identity
[Process Isolation]: https://learn.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container#process-isolation