mirror of
https://github.com/kemko/nomad.git
synced 2026-01-06 02:15:43 +03:00
driver/docker: add extra labels ( job name, task and task group name)
This commit is contained in:
@@ -980,7 +980,11 @@ func (tr *TaskRunner) buildTaskConfig() *drivers.TaskConfig {
|
||||
ID: fmt.Sprintf("%s/%s/%s", alloc.ID, task.Name, invocationid),
|
||||
Name: task.Name,
|
||||
JobName: alloc.Job.Name,
|
||||
JobID: alloc.Job.ID,
|
||||
TaskGroupName: alloc.TaskGroup,
|
||||
Namespace: alloc.Namespace,
|
||||
NodeName: alloc.NodeName,
|
||||
NodeID: alloc.NodeID,
|
||||
Resources: &drivers.Resources{
|
||||
NomadResources: taskResources,
|
||||
LinuxResources: &drivers.LinuxResources{
|
||||
|
||||
@@ -203,6 +203,9 @@ var (
|
||||
"ca": hclspec.NewAttr("ca", "string", false),
|
||||
})),
|
||||
|
||||
// extra docker labels, globs supported
|
||||
"extra_labels": hclspec.NewAttr("extra_labels", "list(string)", false),
|
||||
|
||||
// garbage collection options
|
||||
// default needed for both if the gc {...} block is not set and
|
||||
// if the default fields are missing
|
||||
@@ -612,6 +615,7 @@ type DriverConfig struct {
|
||||
DisableLogCollection bool `codec:"disable_log_collection"`
|
||||
PullActivityTimeout string `codec:"pull_activity_timeout"`
|
||||
pullActivityTimeoutDuration time.Duration `codec:"-"`
|
||||
ExtraLabels []string `codec:"extra_labels"`
|
||||
|
||||
AllowRuntimesList []string `codec:"allow_runtimes"`
|
||||
allowRuntimes map[string]struct{} `codec:"-"`
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/hashicorp/nomad/plugins/base"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
pstructs "github.com/hashicorp/nomad/plugins/shared/structs"
|
||||
"github.com/ryanuber/go-glob"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -70,7 +71,14 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
dockerLabelAllocID = "com.hashicorp.nomad.alloc_id"
|
||||
dockerLabelAllocID = "com.hashicorp.nomad.alloc_id"
|
||||
dockerLabelJobName = "com.hashicorp.nomad.job_name"
|
||||
dockerLabelJobID = "com.hashicorp.nomad.job_id"
|
||||
dockerLabelTaskGroupName = "com.hashicorp.nomad.task_group_name"
|
||||
dockerLabelTaskName = "com.hashicorp.nomad.task_name"
|
||||
dockerLabelNamespace = "com.hashicorp.nomad.namespace"
|
||||
dockerLabelNodeName = "com.hashicorp.nomad.node_name"
|
||||
dockerLabelNodeID = "com.hashicorp.nomad.node_id"
|
||||
)
|
||||
|
||||
type Driver struct {
|
||||
@@ -1114,7 +1122,34 @@ func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *T
|
||||
for k, v := range driverConfig.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
// main mandatory label
|
||||
labels[dockerLabelAllocID] = task.AllocID
|
||||
|
||||
//optional labels, as configured in plugin configuration
|
||||
for _, configurationExtraLabel := range d.config.ExtraLabels {
|
||||
if glob.Glob(configurationExtraLabel, "job_name") {
|
||||
labels[dockerLabelJobName] = task.JobName
|
||||
}
|
||||
if glob.Glob(configurationExtraLabel, "job_id") {
|
||||
labels[dockerLabelJobID] = task.JobID
|
||||
}
|
||||
if glob.Glob(configurationExtraLabel, "task_group_name") {
|
||||
labels[dockerLabelTaskGroupName] = task.TaskGroupName
|
||||
}
|
||||
if glob.Glob(configurationExtraLabel, "task_name") {
|
||||
labels[dockerLabelTaskName] = task.Name
|
||||
}
|
||||
if glob.Glob(configurationExtraLabel, "namespace") {
|
||||
labels[dockerLabelNamespace] = task.Namespace
|
||||
}
|
||||
if glob.Glob(configurationExtraLabel, "node_name") {
|
||||
labels[dockerLabelNodeName] = task.NodeName
|
||||
}
|
||||
if glob.Glob(configurationExtraLabel, "node_id") {
|
||||
labels[dockerLabelNodeID] = task.NodeID
|
||||
}
|
||||
}
|
||||
|
||||
config.Labels = labels
|
||||
logger.Debug("applied labels on the container", "labels", config.Labels)
|
||||
|
||||
|
||||
@@ -799,13 +799,50 @@ func TestDockerDriver_Labels(t *testing.T) {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// expect to see 1 additional standard labels
|
||||
// expect to see 1 additional standard labels (allocID)
|
||||
require.Equal(t, len(cfg.Labels)+1, len(container.Config.Labels))
|
||||
for k, v := range cfg.Labels {
|
||||
require.Equal(t, v, container.Config.Labels[k])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerDriver_ExtraLabels(t *testing.T) {
|
||||
if !tu.IsCI() {
|
||||
t.Parallel()
|
||||
}
|
||||
testutil.DockerCompatible(t)
|
||||
|
||||
task, cfg, ports := dockerTask(t)
|
||||
defer freeport.Return(ports)
|
||||
|
||||
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
||||
|
||||
dockerClientConfig := make(map[string]interface{})
|
||||
|
||||
dockerClientConfig["extra_labels"] = []string{"task*", "job_name"}
|
||||
client, d, handle, cleanup := dockerSetup(t, task, dockerClientConfig)
|
||||
defer cleanup()
|
||||
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
||||
|
||||
container, err := client.InspectContainer(handle.containerID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
expectedLabels := map[string]string{
|
||||
"com.hashicorp.nomad.alloc_id": task.AllocID,
|
||||
"com.hashicorp.nomad.task_name": task.Name,
|
||||
"com.hashicorp.nomad.task_group_name": task.TaskGroupName,
|
||||
"com.hashicorp.nomad.job_name": task.JobName,
|
||||
}
|
||||
|
||||
// expect to see 4 labels (allocID by default, task_name and task_group_name due to task*, and job_name)
|
||||
require.Equal(t, 4, len(container.Config.Labels))
|
||||
for k, v := range expectedLabels {
|
||||
require.Equal(t, v, container.Config.Labels[k])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerDriver_ForcePull(t *testing.T) {
|
||||
if !tu.IsCI() {
|
||||
t.Parallel()
|
||||
@@ -1065,7 +1102,7 @@ func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) {
|
||||
expectedLabels := map[string]string{
|
||||
// user provided labels
|
||||
"user_label": "user_value",
|
||||
// default labels
|
||||
// default label
|
||||
"com.hashicorp.nomad.alloc_id": task.AllocID,
|
||||
}
|
||||
|
||||
|
||||
@@ -237,8 +237,12 @@ func (c *DNSConfig) Copy() *DNSConfig {
|
||||
type TaskConfig struct {
|
||||
ID string
|
||||
JobName string
|
||||
JobID string
|
||||
TaskGroupName string
|
||||
Name string
|
||||
Namespace string
|
||||
NodeName string
|
||||
NodeID string
|
||||
Env map[string]string
|
||||
DeviceEnv map[string]string
|
||||
Resources *Resources
|
||||
|
||||
@@ -774,6 +774,8 @@ plugin "docker" {
|
||||
ca = "/etc/nomad/nomad.cert"
|
||||
}
|
||||
|
||||
extra_labels = ["job_name", "job_id", "task_group_name", "task_name", "namespace", "node_name", "node_id"]
|
||||
|
||||
gc {
|
||||
image = true
|
||||
image_delay = "3m"
|
||||
@@ -866,6 +868,10 @@ plugin "docker" {
|
||||
capabilities and exclusively use host based log aggregation, you may consider
|
||||
this option to disable nomad log collection overhead.
|
||||
|
||||
- `extra_labels` - Extra labels to add to Docker containers.
|
||||
Available options are `job_name`, `job_id`, `task_group_name`, `task_name`,
|
||||
`namespace`, `node_name`, `node_id`. Globs are supported (e.g. `task*`)
|
||||
|
||||
- `gc` stanza:
|
||||
|
||||
- `image` - Defaults to `true`. Changing this to `false` will prevent Nomad
|
||||
|
||||
Reference in New Issue
Block a user