Files
nomad/drivers/docker/driver_linux_test.go
Tim Gross c8dcd3c2db docker: clamp CPU shares to minimum of 2 (#26081)
In #25963 we added normalization of CPU shares for large hosts where the total
compute was larger than the maximum CPU shares. But if the result after
normalization is less than 2, runc will have an integer overflow. We prevent
this in the shared executor for the `exec`/`rawexec` driver by clamping to the
safe minimum value. Do this for the `docker` driver as well and add test
coverage of it for the shared executor too.

Fixes: https://github.com/hashicorp/nomad/issues/26080
Ref: https://github.com/hashicorp/nomad/pull/25963
2025-06-19 13:48:06 -04:00

142 lines
3.9 KiB
Go

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
//go:build linux
package docker
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/pointer"
"github.com/shoenig/test/must"
"github.com/shoenig/test/wait"
)
func TestDockerDriver_authFromHelper(t *testing.T) {
testutil.DockerCompatible(t)
dir := t.TempDir()
helperPayload := "{\"Username\":\"hashi\",\"Secret\":\"nomad\"}"
helperContent := []byte(fmt.Sprintf("#!/bin/sh\ncat > %s/helper-$1.out;echo '%s'", dir, helperPayload))
helperFile := filepath.Join(dir, "docker-credential-testnomad")
err := os.WriteFile(helperFile, helperContent, 0777)
must.NoError(t, err)
path := os.Getenv("PATH")
t.Setenv("PATH", fmt.Sprintf("%s:%s", path, dir))
authHelper := authFromHelper("testnomad")
creds, err := authHelper("registry.local:5000/repo/image")
must.NoError(t, err)
must.NotNil(t, creds)
must.Eq(t, "hashi", creds.Username)
must.Eq(t, "nomad", creds.Password)
if _, err := os.Stat(filepath.Join(dir, "helper-get.out")); os.IsNotExist(err) {
t.Fatalf("Expected helper-get.out to exist")
}
content, err := os.ReadFile(filepath.Join(dir, "helper-get.out"))
must.NoError(t, err)
must.Eq(t, "registry.local:5000", string(content))
}
func TestDockerDriver_PluginConfig_PidsLimit(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
dh := dockerDriverHarness(t, nil)
driver := dh.Impl().(*Driver)
driver.config.PidsLimit = 5
task, cfg, _ := dockerTask(t)
must.NoError(t, task.EncodeConcreteDriverConfig(cfg))
cfg.PidsLimit = 7
_, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
must.Error(t, err)
must.StrContains(t, err.Error(), `pids_limit cannot be greater than nomad plugin config pids_limit`)
// Task PidsLimit should override plugin PidsLimit.
cfg.PidsLimit = 3
opts, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
must.NoError(t, err)
must.Eq(t, pointer.Of(int64(3)), opts.Host.PidsLimit)
}
func TestDockerDriver_PidsLimit(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, _ := dockerTask(t)
cfg.PidsLimit = 1
cfg.Command = "/bin/sh"
cfg.Args = []string{"-c", "sleep 5 & sleep 5 & sleep 5"}
must.NoError(t, task.EncodeConcreteDriverConfig(cfg))
_, _, handle, cleanup := dockerSetup(t, task, nil)
t.Cleanup(cleanup)
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
defer cancel()
select {
case <-handle.waitCh:
must.Eq(t, 2, handle.exitResult.ExitCode)
case <-ctx.Done():
t.Fatalf("task should have immediately completed")
}
// Check that data was written to the directory.
outputFile := filepath.Join(task.TaskDir().LogDir, "redis-demo.stderr.0")
exp := "can't fork"
must.Wait(t, wait.InitialSuccess(wait.ErrorFunc(func() error {
act, err := os.ReadFile(outputFile)
if err != nil {
return err
}
if !strings.Contains(string(act), exp) {
return fmt.Errorf("Expected %q in output %q", exp, string(act))
}
return nil
}),
wait.Timeout(5*time.Second),
wait.Gap(50*time.Millisecond),
))
}
func TestDockerDriver_NormalizeCPUShares(t *testing.T) {
dh := dockerDriverHarness(t, nil)
driver := dh.Impl().(*Driver)
driver.compute.TotalCompute = 12000
must.Eq(t, maxCPUShares, driver.cpuResources(maxCPUShares))
must.Eq(t, 1000, driver.cpuResources(1000))
driver.compute.TotalCompute = maxCPUShares
must.Eq(t, maxCPUShares, driver.cpuResources(maxCPUShares))
driver.compute.TotalCompute = maxCPUShares + 1
must.Eq(t, 262143, driver.cpuResources(maxCPUShares))
driver.compute.TotalCompute = maxCPUShares + 1
must.Eq(t, 2, driver.cpuResources(2))
driver.compute.TotalCompute = maxCPUShares + 1
must.Eq(t, 2, driver.cpuResources(1))
driver.compute.TotalCompute = maxCPUShares * 2
must.Eq(t, 500, driver.cpuResources(1000))
must.Eq(t, maxCPUShares/2, driver.cpuResources(maxCPUShares))
}