mirror of
https://github.com/kemko/nomad.git
synced 2026-01-07 10:55:42 +03:00
driver/docker: enable setting hard/soft memory limits
Fixes #2093 Enable configuring `memory_hard_limit` in the docker config stanza for tasks. If set, this field will be passed to the container runtime as `--memory`, and the `memory` configuration from the task resource configuration will be passed as `--memory_reservation`, creating hard and soft memory limits for tasks using the docker task driver.
This commit is contained in:
@@ -319,7 +319,8 @@ var (
|
||||
"driver": hclspec.NewAttr("driver", "string", false),
|
||||
"config": hclspec.NewAttr("config", "list(map(string))", false),
|
||||
})),
|
||||
"mac_address": hclspec.NewAttr("mac_address", "string", false),
|
||||
"mac_address": hclspec.NewAttr("mac_address", "string", false),
|
||||
"memory_hard_limit": hclspec.NewAttr("memory_hard_limit", "number", false),
|
||||
"mounts": hclspec.NewBlockList("mounts", hclspec.NewObject(map[string]*hclspec.Spec{
|
||||
"type": hclspec.NewDefault(
|
||||
hclspec.NewAttr("type", "string", false),
|
||||
@@ -408,6 +409,7 @@ type TaskConfig struct {
|
||||
LoadImage string `codec:"load"`
|
||||
Logging DockerLogging `codec:"logging"`
|
||||
MacAddress string `codec:"mac_address"`
|
||||
MemoryHardLimit int64 `codec:"memory_hard_limit"`
|
||||
Mounts []DockerMount `codec:"mounts"`
|
||||
NetworkAliases []string `codec:"network_aliases"`
|
||||
NetworkMode string `codec:"network_mode"`
|
||||
|
||||
@@ -222,6 +222,7 @@ config {
|
||||
}
|
||||
}
|
||||
mac_address = "02:42:ac:11:00:02"
|
||||
memory_hard_limit = 512
|
||||
mounts = [
|
||||
{
|
||||
type = "bind"
|
||||
@@ -349,7 +350,8 @@ config {
|
||||
"max-file": "3",
|
||||
"max-size": "10m",
|
||||
}},
|
||||
MacAddress: "02:42:ac:11:00:02",
|
||||
MacAddress: "02:42:ac:11:00:02",
|
||||
MemoryHardLimit: 512,
|
||||
Mounts: []DockerMount{
|
||||
{
|
||||
Type: "bind",
|
||||
@@ -524,7 +526,6 @@ func TestConfig_InternalCapabilities(t *testing.T) {
|
||||
require.Equal(t, c.expected, d.InternalCapabilities())
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestConfig_DriverConfig_PullActivityTimeout(t *testing.T) {
|
||||
@@ -582,5 +583,4 @@ func TestConfig_DriverConfig_AllowRuntimes(t *testing.T) {
|
||||
require.Equal(t, c.expected, d.config.allowRuntimes)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -724,6 +724,30 @@ func parseSecurityOpts(securityOpts []string) ([]string, error) {
|
||||
return securityOpts, nil
|
||||
}
|
||||
|
||||
// memoryLimits computes the memory and memory_reservation values passed along to
|
||||
// the docker host config. These fields represent hard and soft memory limits from
|
||||
// docker's perspective, respectively.
|
||||
//
|
||||
// The memory field on the task configuration can be interpreted as a hard or soft
|
||||
// limit. Before Nomad v0.11.3, it was always a hard limit. Now, it is interpreted
|
||||
// as a soft limit if the memory_hard_limit value is configured on the docker
|
||||
// task driver configuration. When memory_hard_limit is set, the docker host
|
||||
// config is configured such that the memory field is equal to memory_hard_limit
|
||||
// value, and the memory_reservation field is set to the task driver memory value.
|
||||
//
|
||||
// If memory_hard_limit is not set (i.e. zero value), then the memory field of
|
||||
// the task resource config is interpreted as a hard limit. In this case both the
|
||||
// memory is set to the task resource memory value and memory_reservation is left
|
||||
// unset.
|
||||
//
|
||||
// Returns (memory (hard), memory_reservation (soft)) values in bytes.
|
||||
func (Driver) memoryLimits(driverHardLimitMB, taskMemoryLimitBytes int64) (int64, int64) {
|
||||
if driverHardLimitMB <= 0 {
|
||||
return taskMemoryLimitBytes, 0
|
||||
}
|
||||
return driverHardLimitMB * 1024 * 1024, taskMemoryLimitBytes
|
||||
}
|
||||
|
||||
func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *TaskConfig,
|
||||
imageID string) (docker.CreateContainerOptions, error) {
|
||||
|
||||
@@ -772,8 +796,12 @@ func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *T
|
||||
return c, fmt.Errorf("requested runtime %q is not allowed", containerRuntime)
|
||||
}
|
||||
|
||||
memory, memoryReservation := d.memoryLimits(driverConfig.MemoryHardLimit, task.Resources.LinuxResources.MemoryLimitBytes)
|
||||
|
||||
hostConfig := &docker.HostConfig{
|
||||
Memory: task.Resources.LinuxResources.MemoryLimitBytes,
|
||||
Memory: memory, // hard limit
|
||||
MemoryReservation: memoryReservation, // soft limit
|
||||
|
||||
CPUShares: task.Resources.LinuxResources.CPUShares,
|
||||
|
||||
// Binds are used to mount a host volume into the container. We mount a
|
||||
@@ -837,7 +865,8 @@ func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *T
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug("configured resources", "memory", hostConfig.Memory,
|
||||
logger.Debug("configured resources",
|
||||
"memory", hostConfig.Memory, "memory_reservation", hostConfig.MemoryReservation,
|
||||
"cpu_shares", hostConfig.CPUShares, "cpu_quota", hostConfig.CPUQuota,
|
||||
"cpu_period", hostConfig.CPUPeriod)
|
||||
|
||||
|
||||
@@ -2681,3 +2681,19 @@ func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) {
|
||||
require.NotZero(t, c.HostConfig.CPUQuota)
|
||||
require.NotZero(t, c.HostConfig.CPUPeriod)
|
||||
}
|
||||
|
||||
func TestDockerDriver_memoryLimits(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("driver hard limit not set", func(t *testing.T) {
|
||||
memory, memoryReservation := new(Driver).memoryLimits(0, 256*1024*1024)
|
||||
require.Equal(t, int64(256*1024*1024), memory)
|
||||
require.Equal(t, int64(0), memoryReservation)
|
||||
})
|
||||
|
||||
t.Run("driver hard limit is set", func(t *testing.T) {
|
||||
memory, memoryReservation := new(Driver).memoryLimits(512, 256*1024*1024)
|
||||
require.Equal(t, int64(512*1024*1024), memory)
|
||||
require.Equal(t, int64(256*1024*1024), memoryReservation)
|
||||
})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user