diff --git a/.changelog/26681.txt b/.changelog/26681.txt new file mode 100644 index 000000000..88a9cd5e3 --- /dev/null +++ b/.changelog/26681.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets: Adds secret block for fetching and interpolating secrets in job spec +``` diff --git a/api/tasks.go b/api/tasks.go index db00931d3..cbc6fef1a 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -786,6 +786,7 @@ type Task struct { KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"` Kind string `hcl:"kind,optional"` ScalingPolicies []*ScalingPolicy `hcl:"scaling,block"` + Secrets []*Secret `hcl:"secret,block"` // Identity is the default Nomad Workload Identity and will be added to // Identities with the name "default" @@ -825,6 +826,9 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { for _, tmpl := range t.Templates { tmpl.Canonicalize() } + for _, s := range t.Secrets { + s.Canonicalize() + } for _, s := range t.Services { s.Canonicalize(t, tg, job) } @@ -1042,6 +1046,24 @@ func (v *Vault) Canonicalize() { } } +type Secret struct { + Name string `hcl:"name,label"` + Provider string `hcl:"provider,optional"` + Path string `hcl:"path,optional"` + Config map[string]any `hcl:"config,block"` + Env map[string]string `hcl:"env,block"` +} + +func (s *Secret) Canonicalize() { + if len(s.Config) == 0 { + s.Config = nil + } + + if len(s.Env) == 0 { + s.Env = nil + } +} + // NewTask creates and initializes a new Task. func NewTask(name, driver string) *Task { return &Task{ diff --git a/api/tasks_test.go b/api/tasks_test.go index 392dfc67a..2cc5c6c14 100644 --- a/api/tasks_test.go +++ b/api/tasks_test.go @@ -506,6 +506,29 @@ func TestTask_Canonicalize_Vault(t *testing.T) { } } +func TestTask_Canonicalize_Secret(t *testing.T) { + testutil.Parallel(t) + + testSecret := &Secret{ + Name: "test-secret", + Provider: "test-provider", + Path: "/test/path", + Config: make(map[string]any), + Env: make(map[string]string), + } + + expected := &Secret{ + Name: "test-secret", + Provider: "test-provider", + Path: "/test/path", + Config: nil, + Env: nil, + } + + testSecret.Canonicalize() + must.Eq(t, expected, testSecret) +} + // Ensures no regression on https://github.com/hashicorp/nomad/issues/3132 func TestTaskGroup_Canonicalize_Update(t *testing.T) { testutil.Parallel(t) diff --git a/ci/test-core.json b/ci/test-core.json index d117e2a0b..7bb97c929 100644 --- a/ci/test-core.json +++ b/ci/test-core.json @@ -11,6 +11,7 @@ "client/allocdir/...", "client/allochealth/...", "client/allocwatcher/...", + "client/commonplugins/...", "client/config/...", "client/consul/...", "client/devicemanager/...", diff --git a/client/allocrunner/taskrunner/artifact_hook_test.go b/client/allocrunner/taskrunner/artifact_hook_test.go index 3e2ee1891..f65fc78b0 100644 --- a/client/allocrunner/taskrunner/artifact_hook_test.go +++ b/client/allocrunner/taskrunner/artifact_hook_test.go @@ -88,7 +88,7 @@ func TestTaskRunner_ArtifactHook_PartialDone(t *testing.T) { _, destdir := getter.SetupDir(t) req := &interfaces.TaskPrestartRequest{ - TaskEnv: taskenv.NewTaskEnv(nil, nil, nil, nil, destdir, ""), + TaskEnv: taskenv.NewTaskEnv(nil, nil, nil, nil, nil, destdir, ""), TaskDir: &allocdir.TaskDir{Dir: destdir}, Task: &structs.Task{ Artifacts: []*structs.TaskArtifact{ @@ -180,7 +180,7 @@ func TestTaskRunner_ArtifactHook_ConcurrentDownloadSuccess(t *testing.T) { _, destdir := getter.SetupDir(t) req := &interfaces.TaskPrestartRequest{ - TaskEnv: taskenv.NewTaskEnv(nil, nil, nil, nil, destdir, ""), + TaskEnv: taskenv.NewTaskEnv(nil, nil, nil, nil, nil, destdir, ""), TaskDir: &allocdir.TaskDir{Dir: destdir}, Task: &structs.Task{ Artifacts: []*structs.TaskArtifact{ @@ -271,7 +271,7 @@ func TestTaskRunner_ArtifactHook_ConcurrentDownloadFailure(t *testing.T) { _, destdir := getter.SetupDir(t) req := &interfaces.TaskPrestartRequest{ - TaskEnv: taskenv.NewTaskEnv(nil, nil, nil, nil, destdir, ""), + TaskEnv: taskenv.NewTaskEnv(nil, nil, nil, nil, nil, destdir, ""), TaskDir: &allocdir.TaskDir{Dir: destdir}, Task: &structs.Task{ Artifacts: []*structs.TaskArtifact{ diff --git a/client/allocrunner/taskrunner/envoy_version_hook_test.go b/client/allocrunner/taskrunner/envoy_version_hook_test.go index 871f65ded..2b0c8b49a 100644 --- a/client/allocrunner/taskrunner/envoy_version_hook_test.go +++ b/client/allocrunner/taskrunner/envoy_version_hook_test.go @@ -26,7 +26,7 @@ var ( taskEnvDefault = taskenv.NewTaskEnv(nil, nil, nil, map[string]string{ "meta.connect.sidecar_image": envoy.ImageFormat, "meta.connect.gateway_image": envoy.ImageFormat, - }, "", "") + }, nil, "", "") ) func TestEnvoyVersionHook_semver(t *testing.T) { @@ -147,7 +147,7 @@ func TestEnvoyVersionHook_interpolateImage(t *testing.T) { "MY_ENVOY": "my/envoy", }, map[string]string{ "MY_ENVOY": "my/envoy", - }, nil, nil, "", "")) + }, nil, nil, nil, "", "")) must.Eq(t, "my/envoy", task.Config["image"]) }) diff --git a/client/allocrunner/taskrunner/secrets/nomad_provider.go b/client/allocrunner/taskrunner/secrets/nomad_provider.go new file mode 100644 index 000000000..056e3de57 --- /dev/null +++ b/client/allocrunner/taskrunner/secrets/nomad_provider.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package secrets + +import ( + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/hashicorp/nomad/nomad/structs" + "github.com/mitchellh/mapstructure" +) + +const SecretProviderNomad = "nomad" + +type nomadProviderConfig struct { + Namespace string `mapstructure:"namespace"` +} + +func defaultNomadConfig(namespace string) *nomadProviderConfig { + return &nomadProviderConfig{ + Namespace: namespace, + } +} + +type NomadProvider struct { + secret *structs.Secret + secretDir string + tmplFile string + config *nomadProviderConfig +} + +// NewNomadProvider takes a task secret and decodes the config, overwriting the default config fields +// with any provided fields, returning an error if the secret or secret's config is invalid. +func NewNomadProvider(secret *structs.Secret, secretDir string, tmplFile string, namespace string) (*NomadProvider, error) { + conf := defaultNomadConfig(namespace) + if err := mapstructure.Decode(secret.Config, conf); err != nil { + return nil, err + } + + if err := validateNomadInputs(conf, secret.Path); err != nil { + return nil, err + } + + return &NomadProvider{ + config: conf, + secret: secret, + secretDir: secretDir, + tmplFile: tmplFile, + }, nil +} + +func (n *NomadProvider) BuildTemplate() *structs.Template { + data := fmt.Sprintf(` + {{ with nomadVar "%s@%s" }} + {{ range $k, $v := . }} + secret.%s.{{ $k }}={{ $v }} + {{ end }} + {{ end }}`, + n.secret.Path, n.config.Namespace, n.secret.Name) + + return &structs.Template{ + EmbeddedTmpl: data, + DestPath: filepath.Clean(filepath.Join(n.secretDir, n.tmplFile)), + ChangeMode: structs.TemplateChangeModeNoop, + Once: true, + } +} + +// validateNomadInputs ensures none of the user provided inputs contain delimiters +// that could be used to inject other CT functions. +func validateNomadInputs(conf *nomadProviderConfig, path string) error { + if strings.ContainsAny(conf.Namespace, "(){}") { + return errors.New("namespace cannot contain template delimiters or parenthesis") + } + + if strings.ContainsAny(path, "(){}") { + return errors.New("path cannot contain template delimiters or parenthesis") + } + + return nil +} diff --git a/client/allocrunner/taskrunner/secrets/nomad_provider_test.go b/client/allocrunner/taskrunner/secrets/nomad_provider_test.go new file mode 100644 index 000000000..bbed5ebe9 --- /dev/null +++ b/client/allocrunner/taskrunner/secrets/nomad_provider_test.go @@ -0,0 +1,82 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package secrets + +import ( + "testing" + + "github.com/hashicorp/nomad/nomad/structs" + "github.com/shoenig/test/must" +) + +func TestNomadProvider_BuildTemplate(t *testing.T) { + t.Run("variable template succeeds", func(t *testing.T) { + testDir := t.TempDir() + testSecret := &structs.Secret{ + Name: "foo", + Provider: "nomad", + Path: "/test/path", + Config: map[string]any{ + "namespace": "dev", + }, + } + p, err := NewNomadProvider(testSecret, testDir, "test", "default") + must.NoError(t, err) + + tmpl := p.BuildTemplate() + must.NotNil(t, tmpl) + + // expected template should have correct path and name + expectedTmpl := ` + {{ with nomadVar "/test/path@dev" }} + {{ range $k, $v := . }} + secret.foo.{{ $k }}={{ $v }} + {{ end }} + {{ end }}` + // validate template string contains expected data + must.Eq(t, tmpl.EmbeddedTmpl, expectedTmpl) + }) + + t.Run("invalid config options errors", func(t *testing.T) { + testDir := t.TempDir() + testSecret := &structs.Secret{ + Name: "foo", + Provider: "nomad", + Path: "/test/path", + Config: map[string]any{ + "namespace": 123, + }, + } + _, err := NewNomadProvider(testSecret, testDir, "test", "default") + must.Error(t, err) + }) + + t.Run("path with delimiter errors", func(t *testing.T) { + testDir := t.TempDir() + testSecret := &structs.Secret{ + Name: "foo", + Provider: "nomad", + Path: "/test/path}}", + Config: map[string]any{ + "namespace": 123, + }, + } + _, err := NewNomadProvider(testSecret, testDir, "test", "default") + must.Error(t, err) + }) + + t.Run("namespace with parenthesis errors", func(t *testing.T) { + testDir := t.TempDir() + testSecret := &structs.Secret{ + Name: "foo", + Provider: "nomad", + Path: "/test/path", + Config: map[string]any{ + "namespace": "( some namespace )", + }, + } + _, err := NewNomadProvider(testSecret, testDir, "test", "default") + must.Error(t, err) + }) +} diff --git a/client/allocrunner/taskrunner/secrets/plugin_provider.go b/client/allocrunner/taskrunner/secrets/plugin_provider.go new file mode 100644 index 000000000..b0c3bdc96 --- /dev/null +++ b/client/allocrunner/taskrunner/secrets/plugin_provider.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package secrets + +import ( + "context" + "fmt" + + "github.com/hashicorp/nomad/client/commonplugins" +) + +type ExternalPluginProvider struct { + // plugin is the commonplugin to be executed by this secret + plugin commonplugins.SecretsPlugin + + // name of the plugin and also the executable + name string + + // path is the secret location used in Fetch + path string +} + +type Response struct { + Result map[string]string `json:"result"` + Error *string `json:"error,omitempty"` +} + +func NewExternalPluginProvider(plugin commonplugins.SecretsPlugin, name string, path string) *ExternalPluginProvider { + return &ExternalPluginProvider{ + plugin: plugin, + name: name, + path: path, + } +} + +func (p *ExternalPluginProvider) Fetch(ctx context.Context) (map[string]string, error) { + resp, err := p.plugin.Fetch(ctx, p.path) + if err != nil { + return nil, fmt.Errorf("failed to fetch secret from plugin %s: %w", p.name, err) + } + if resp.Error != nil { + return nil, fmt.Errorf("error returned from secret plugin %s: %s", p.name, *resp.Error) + } + + formatted := make(map[string]string, len(resp.Result)) + for k, v := range resp.Result { + formatted[fmt.Sprintf("secret.%s.%s", p.name, k)] = v + } + + return formatted, nil +} diff --git a/client/allocrunner/taskrunner/secrets/plugin_provider_test.go b/client/allocrunner/taskrunner/secrets/plugin_provider_test.go new file mode 100644 index 000000000..8d6ce13ae --- /dev/null +++ b/client/allocrunner/taskrunner/secrets/plugin_provider_test.go @@ -0,0 +1,86 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package secrets + +import ( + "context" + "errors" + "testing" + + "github.com/hashicorp/nomad/client/commonplugins" + "github.com/shoenig/test/must" + "github.com/stretchr/testify/mock" +) + +type MockSecretPlugin struct { + mock.Mock +} + +func (m *MockSecretPlugin) Fingerprint(ctx context.Context) (*commonplugins.PluginFingerprint, error) { + return nil, nil +} + +func (m *MockSecretPlugin) Fetch(ctx context.Context, path string) (*commonplugins.SecretResponse, error) { + args := m.Called() + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*commonplugins.SecretResponse), args.Error(1) +} + +func (m *MockSecretPlugin) Parse() (map[string]string, error) { + return nil, nil +} + +// SecretsPlugin is tested in commonplugins package. We can use a mock here to test how +// the ExternalPluginProvider handles various error scenarios when calling Fetch. +func TestExternalPluginProvider_Fetch(t *testing.T) { + t.Run("errors if fetch errors", func(t *testing.T) { + mockSecretPlugin := new(MockSecretPlugin) + mockSecretPlugin.On("Fetch", mock.Anything).Return(nil, errors.New("something bad")) + + testProvider := NewExternalPluginProvider(mockSecretPlugin, "test", "test") + + vars, err := testProvider.Fetch(t.Context()) + must.ErrorContains(t, err, "something bad") + must.Nil(t, vars) + }) + + t.Run("errors if fetch response contains error", func(t *testing.T) { + mockSecretPlugin := new(MockSecretPlugin) + testError := "something bad" + mockSecretPlugin.On("Fetch", mock.Anything).Return(&commonplugins.SecretResponse{ + Result: nil, + Error: &testError, + }, nil) + + testProvider := NewExternalPluginProvider(mockSecretPlugin, "test", "test") + + vars, err := testProvider.Fetch(t.Context()) + must.ErrorContains(t, err, "error returned from secret plugin") + must.Nil(t, vars) + }) + + t.Run("formats response correctly", func(t *testing.T) { + mockSecretPlugin := new(MockSecretPlugin) + mockSecretPlugin.On("Fetch", mock.Anything).Return(&commonplugins.SecretResponse{ + Result: map[string]string{ + "testkey": "testvalue", + }, + Error: nil, + }, nil) + + testProvider := NewExternalPluginProvider(mockSecretPlugin, "test", "test") + + result, err := testProvider.Fetch(t.Context()) + must.NoError(t, err) + + exp := map[string]string{ + "secret.test.testkey": "testvalue", + } + must.Eq(t, exp, result) + }) +} diff --git a/client/allocrunner/taskrunner/secrets/vault_provider.go b/client/allocrunner/taskrunner/secrets/vault_provider.go new file mode 100644 index 000000000..3ad29760e --- /dev/null +++ b/client/allocrunner/taskrunner/secrets/vault_provider.go @@ -0,0 +1,80 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package secrets + +import ( + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/hashicorp/nomad/nomad/structs" + "github.com/mitchellh/mapstructure" +) + +const ( + SecretProviderVault = "vault" + + VAULT_KV = "kv" + VAULT_KV_V2 = "kv_v2" +) + +type vaultProviderConfig struct { + Engine string `mapstructure:"engine"` +} + +func defaultVaultConfig() *vaultProviderConfig { + return &vaultProviderConfig{ + Engine: VAULT_KV, + } +} + +type VaultProvider struct { + secret *structs.Secret + secretDir string + tmplFile string + conf *vaultProviderConfig +} + +// NewVaultProvider takes a task secret and decodes the config, overwriting the default config fields +// with any provided fields, returning an error if the secret or secret's config is invalid. +func NewVaultProvider(secret *structs.Secret, secretDir string, tmplFile string) (*VaultProvider, error) { + conf := defaultVaultConfig() + if err := mapstructure.Decode(secret.Config, conf); err != nil { + return nil, err + } + + if strings.ContainsAny(secret.Path, "(){}") { + return nil, errors.New("secret path cannot contain template delimiters or parenthesis") + } + + return &VaultProvider{ + secret: secret, + secretDir: secretDir, + tmplFile: tmplFile, + conf: conf, + }, nil +} + +func (v *VaultProvider) BuildTemplate() *structs.Template { + indexKey := ".Data" + if v.conf.Engine == VAULT_KV_V2 { + indexKey = ".Data.data" + } + + data := fmt.Sprintf(` + {{ with secret "%s" }} + {{ range $k, $v := %s }} + secret.%s.{{ $k }}={{ $v }} + {{ end }} + {{ end }}`, + v.secret.Path, indexKey, v.secret.Name) + + return &structs.Template{ + EmbeddedTmpl: data, + DestPath: filepath.Clean(filepath.Join(v.secretDir, v.tmplFile)), + ChangeMode: structs.TemplateChangeModeNoop, + Once: true, + } +} diff --git a/client/allocrunner/taskrunner/secrets/vault_provider_test.go b/client/allocrunner/taskrunner/secrets/vault_provider_test.go new file mode 100644 index 000000000..77b1eef0b --- /dev/null +++ b/client/allocrunner/taskrunner/secrets/vault_provider_test.go @@ -0,0 +1,89 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package secrets + +import ( + "testing" + + "github.com/hashicorp/nomad/nomad/structs" + "github.com/shoenig/test/must" +) + +func TestVaultProvider_BuildTemplate(t *testing.T) { + t.Run("kv template succeeds", func(t *testing.T) { + testDir := t.TempDir() + testSecret := &structs.Secret{ + Name: "foo", + Provider: "vault", + Path: "/test/path", + } + p, err := NewVaultProvider(testSecret, testDir, "test") + must.NoError(t, err) + + tmpl := p.BuildTemplate() + must.NotNil(t, tmpl) + + // expected template should have correct path, index, and name + expectedTmpl := ` + {{ with secret "/test/path" }} + {{ range $k, $v := .Data }} + secret.foo.{{ $k }}={{ $v }} + {{ end }} + {{ end }}` + // validate template string contains expected data + must.Eq(t, tmpl.EmbeddedTmpl, expectedTmpl) + }) + + t.Run("kv_v2 template succeeds", func(t *testing.T) { + testDir := t.TempDir() + testSecret := &structs.Secret{ + Name: "foo", + Provider: "vault", + Path: "/test/path", + Config: map[string]any{ + "engine": VAULT_KV_V2, + }, + } + p, err := NewVaultProvider(testSecret, testDir, "test") + must.NoError(t, err) + + tmpl := p.BuildTemplate() + must.NotNil(t, tmpl) + + // expected template should have correct path, index, and name + expectedTmpl := ` + {{ with secret "/test/path" }} + {{ range $k, $v := .Data.data }} + secret.foo.{{ $k }}={{ $v }} + {{ end }} + {{ end }}` + // validate template string contains expected data + must.Eq(t, tmpl.EmbeddedTmpl, expectedTmpl) + }) + + t.Run("invalid config options errors", func(t *testing.T) { + testDir := t.TempDir() + testSecret := &structs.Secret{ + Name: "foo", + Provider: "vault", + Path: "/test/path", + Config: map[string]any{ + "engine": 123, + }, + } + _, err := NewVaultProvider(testSecret, testDir, "test") + must.Error(t, err) + }) + + t.Run("path containing delimeter errors", func(t *testing.T) { + testDir := t.TempDir() + testSecret := &structs.Secret{ + Name: "foo", + Provider: "vault", + Path: "/test/path}}", + } + _, err := NewVaultProvider(testSecret, testDir, "test") + must.Error(t, err) + }) +} diff --git a/client/allocrunner/taskrunner/secrets_hook.go b/client/allocrunner/taskrunner/secrets_hook.go new file mode 100644 index 000000000..73de9e457 --- /dev/null +++ b/client/allocrunner/taskrunner/secrets_hook.go @@ -0,0 +1,211 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package taskrunner + +import ( + "bytes" + "context" + "fmt" + "sync" + + "github.com/hashicorp/consul-template/renderer" + "github.com/hashicorp/go-envparse" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/client/allocrunner/interfaces" + ti "github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces" + "github.com/hashicorp/nomad/client/allocrunner/taskrunner/secrets" + "github.com/hashicorp/nomad/client/allocrunner/taskrunner/template" + "github.com/hashicorp/nomad/client/commonplugins" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/client/taskenv" + "github.com/hashicorp/nomad/nomad/structs" +) + +type TemplateProvider interface { + BuildTemplate() *structs.Template +} + +type PluginProvider interface { + Fetch(context.Context) (map[string]string, error) +} + +type secretsHookConfig struct { + // logger is used to log + logger log.Logger + + // lifecycle is used to interact with the task's lifecycle + lifecycle ti.TaskLifecycle + + // events is used to emit events + events ti.EventEmitter + + // clientConfig is the Nomad Client configuration + clientConfig *config.Config + + // envBuilder is the environment variable builder for the task. + envBuilder *taskenv.Builder + + // nomadNamespace is the job's Nomad namespace + nomadNamespace string +} + +type secretsHook struct { + // logger is used to log + logger log.Logger + + // lifecycle is used to interact with the task's lifecycle + lifecycle ti.TaskLifecycle + + // events is used to emit events + events ti.EventEmitter + + // clientConfig is the Nomad Client configuration + clientConfig *config.Config + + // envBuilder is the environment variable builder for the task + envBuilder *taskenv.Builder + + // nomadNamespace is the job's Nomad namespace + nomadNamespace string + + // secrets to be fetched and populated for interpolation + secrets []*structs.Secret +} + +func newSecretsHook(conf *secretsHookConfig, secrets []*structs.Secret) *secretsHook { + return &secretsHook{ + logger: conf.logger, + lifecycle: conf.lifecycle, + events: conf.events, + clientConfig: conf.clientConfig, + envBuilder: conf.envBuilder, + nomadNamespace: conf.nomadNamespace, + secrets: secrets, + } +} + +func (h *secretsHook) Name() string { + return "secrets" +} + +func (h *secretsHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { + tmplProvider, pluginProvider, err := h.buildSecretProviders(req.TaskDir.SecretsDir) + if err != nil { + return err + } + + templates := []*structs.Template{} + for _, p := range tmplProvider { + templates = append(templates, p.BuildTemplate()) + } + + vaultCluster := req.Task.GetVaultClusterName() + vaultConfig := h.clientConfig.GetVaultConfigs(h.logger)[vaultCluster] + + mu := &sync.Mutex{} + contents := []byte{} + unblock := make(chan struct{}) + tm, err := template.NewTaskTemplateManager(&template.TaskTemplateManagerConfig{ + UnblockCh: unblock, + Lifecycle: h.lifecycle, + Events: h.events, + Templates: templates, + ClientConfig: h.clientConfig, + VaultToken: req.VaultToken, + VaultConfig: vaultConfig, + VaultNamespace: req.Alloc.Job.VaultNamespace, + TaskDir: req.TaskDir.Dir, + EnvBuilder: h.envBuilder, + MaxTemplateEventRate: template.DefaultMaxTemplateEventRate, + NomadNamespace: h.nomadNamespace, + NomadToken: req.NomadToken, + TaskID: req.Alloc.ID + "-" + req.Task.Name, + Logger: h.logger, + + // This RenderFunc is used to keep any secret data from being written to disk. + RenderFunc: func(ri *renderer.RenderInput) (*renderer.RenderResult, error) { + // This RenderFunc is called by a single goroutine synchronously, but we + // lock the append in the event this behavior changes without us knowing. + mu.Lock() + defer mu.Unlock() + contents = append(contents, ri.Contents...) + return &renderer.RenderResult{ + DidRender: true, + WouldRender: true, + Contents: ri.Contents, + }, nil + }, + }) + if err != nil { + return err + } + + go tm.Run() + + // Safeguard against the template manager continuing to run. + defer tm.Stop() + + select { + case <-ctx.Done(): + return nil + case <-unblock: + } + + // Set secrets from templates + m, err := envparse.Parse(bytes.NewBuffer(contents)) + if err != nil { + return err + } + h.envBuilder.SetSecrets(m) + + // Set secrets from plugin providers + for _, p := range pluginProvider { + vars, err := p.Fetch(ctx) + if err != nil { + return err + } + h.envBuilder.SetSecrets(vars) + } + + resp.Done = true + return nil +} + +func (h *secretsHook) buildSecretProviders(secretDir string) ([]TemplateProvider, []PluginProvider, error) { + // Any configuration errors will be found when calling the secret providers constructor, + // so use a multierror to collect all errors and return them to the user at the same time. + tmplProvider, pluginProvider, mErr := []TemplateProvider{}, []PluginProvider{}, new(multierror.Error) + + for idx, s := range h.secrets { + if s == nil { + continue + } + + tmplFile := fmt.Sprintf("temp-%d", idx) + switch s.Provider { + case secrets.SecretProviderNomad: + if p, err := secrets.NewNomadProvider(s, secretDir, tmplFile, h.nomadNamespace); err != nil { + multierror.Append(mErr, err) + } else { + tmplProvider = append(tmplProvider, p) + } + case secrets.SecretProviderVault: + if p, err := secrets.NewVaultProvider(s, secretDir, tmplFile); err != nil { + multierror.Append(mErr, err) + } else { + tmplProvider = append(tmplProvider, p) + } + default: + plug, err := commonplugins.NewExternalSecretsPlugin(h.clientConfig.CommonPluginDir, s.Provider, s.Env) + if err != nil { + multierror.Append(mErr, err) + continue + } + pluginProvider = append(pluginProvider, secrets.NewExternalPluginProvider(plug, s.Name, s.Path)) + } + } + + return tmplProvider, pluginProvider, mErr.ErrorOrNil() +} diff --git a/client/allocrunner/taskrunner/secrets_hook_test.go b/client/allocrunner/taskrunner/secrets_hook_test.go new file mode 100644 index 000000000..076c8e5f7 --- /dev/null +++ b/client/allocrunner/taskrunner/secrets_hook_test.go @@ -0,0 +1,319 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package taskrunner + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "strconv" + "testing" + "time" + + "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/client/allocdir" + "github.com/hashicorp/nomad/client/allocrunner/interfaces" + trtesting "github.com/hashicorp/nomad/client/allocrunner/taskrunner/testing" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/client/taskenv" + "github.com/hashicorp/nomad/helper/bufconndialer" + "github.com/hashicorp/nomad/helper/pointer" + "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" + structsc "github.com/hashicorp/nomad/nomad/structs/config" + "github.com/shoenig/test/must" +) + +func TestSecretsHook_Prestart_Nomad(t *testing.T) { + ci.Parallel(t) + + t.Run("nomad provider successfully renders valid secrets", func(t *testing.T) { + secretsResp := ` + { + "CreateIndex": 812, + "CreateTime": 1750782609539170600, + "Items": { + "key2": "value2", + "key1": "value1" + }, + "ModifyIndex": 812, + "ModifyTime": 1750782609539170600, + "Namespace": "default", + "Path": "testnomadvar" + } + ` + count := 0 // CT expects a nomad index header that increments, or else it continues polling + nomadServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("X-Nomad-Index", strconv.Itoa(count)) + fmt.Fprintln(w, secretsResp) + count += 1 + })) + t.Cleanup(nomadServer.Close) + + l, d := bufconndialer.New() + nomadServer.Listener = l + + nomadServer.Start() + + clientConfig := config.DefaultConfig() + clientConfig.TemplateDialer = d + clientConfig.TemplateConfig.DisableSandbox = true + + taskDir := t.TempDir() + alloc := mock.MinAlloc() + task := alloc.Job.TaskGroups[0].Tasks[0] + + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, clientConfig.Region) + conf := &secretsHookConfig{ + logger: testlog.HCLogger(t), + lifecycle: trtesting.NewMockTaskHooks(), + events: &trtesting.MockEmitter{}, + clientConfig: clientConfig, + envBuilder: taskEnv, + } + secretHook := newSecretsHook(conf, []*structs.Secret{ + { + Name: "test_secret", + Provider: "nomad", + Path: "testnomadvar", + Config: map[string]any{ + "namespace": "default", + }, + }, + { + Name: "test_secret1", + Provider: "nomad", + Path: "testnomadvar1", + Config: map[string]any{ + "namespace": "default", + }, + }, + }) + + req := &interfaces.TaskPrestartRequest{ + Alloc: alloc, + Task: task, + TaskDir: &allocdir.TaskDir{Dir: taskDir, SecretsDir: taskDir}, + } + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + err := secretHook.Prestart(ctx, req, &interfaces.TaskPrestartResponse{}) + must.NoError(t, err) + + expected := map[string]string{ + "secret.test_secret.key1": "value1", + "secret.test_secret.key2": "value2", + "secret.test_secret1.key1": "value1", + "secret.test_secret1.key2": "value2", + } + must.Eq(t, expected, taskEnv.Build().TaskSecrets) + }) + + t.Run("returns early if context is cancelled", func(t *testing.T) { + + secretsResp := ` + { + "CreateIndex": 812, + "CreateTime": 1750782609539170600, + "Items": { + "key2": "value2", + "key1": "value1" + }, + "ModifyIndex": 812, + "ModifyTime": 1750782609539170600, + "Namespace": "default", + "Path": "testnomadvar" + } + ` + count := 0 // CT expects a nomad index header that increments, or else it continues polling + nomadServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("X-Nomad-Index", strconv.Itoa(count)) + fmt.Fprintln(w, secretsResp) + count += 1 + })) + t.Cleanup(nomadServer.Close) + + l, d := bufconndialer.New() + nomadServer.Listener = l + + nomadServer.Start() + + clientConfig := config.DefaultConfig() + clientConfig.TemplateDialer = d + clientConfig.TemplateConfig.DisableSandbox = true + + taskDir := t.TempDir() + alloc := mock.MinAlloc() + task := alloc.Job.TaskGroups[0].Tasks[0] + + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, clientConfig.Region) + conf := &secretsHookConfig{ + logger: testlog.HCLogger(t), + lifecycle: trtesting.NewMockTaskHooks(), + events: &trtesting.MockEmitter{}, + clientConfig: clientConfig, + envBuilder: taskEnv, + } + secretHook := newSecretsHook(conf, []*structs.Secret{ + { + Name: "test_secret", + Provider: "nomad", + Path: "testnomadvar", + Config: map[string]any{ + "namespace": "default", + }, + }, + }) + + req := &interfaces.TaskPrestartRequest{ + Alloc: alloc, + Task: task, + TaskDir: &allocdir.TaskDir{Dir: taskDir, SecretsDir: taskDir}, + } + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + cancel() // cancel context to simulate task being stopped + + err := secretHook.Prestart(ctx, req, &interfaces.TaskPrestartResponse{}) + must.NoError(t, err) + + expected := map[string]string{} + must.Eq(t, expected, taskEnv.Build().TaskSecrets) + }) + + t.Run("errors when failure building secret providers", func(t *testing.T) { + clientConfig := config.DefaultConfig() + + taskDir := t.TempDir() + alloc := mock.MinAlloc() + task := alloc.Job.TaskGroups[0].Tasks[0] + + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, clientConfig.Region) + conf := &secretsHookConfig{ + logger: testlog.HCLogger(t), + lifecycle: trtesting.NewMockTaskHooks(), + events: &trtesting.MockEmitter{}, + clientConfig: clientConfig, + envBuilder: taskEnv, + } + + // give an invalid secret, in this case a nomad secret with bad namespace + secretHook := newSecretsHook(conf, []*structs.Secret{ + { + Name: "test_secret", + Provider: "nomad", + Path: "testnomadvar", + Config: map[string]any{ + "namespace": 123, + }, + }, + }) + + req := &interfaces.TaskPrestartRequest{ + Alloc: alloc, + Task: task, + TaskDir: &allocdir.TaskDir{Dir: taskDir, SecretsDir: taskDir}, + } + + // Prestart should error and return after building secrets + err := secretHook.Prestart(context.Background(), req, nil) + must.Error(t, err) + + expected := map[string]string{} + must.Eq(t, expected, taskEnv.Build().TaskSecrets) + }) +} + +func TestSecretsHook_Prestart_Vault(t *testing.T) { + ci.Parallel(t) + + secretsResp := ` +{ + "Data": { + "data": { + "secret": "secret" + }, + "metadata": { + "created_time": "2023-10-18T15:58:29.65137Z", + "custom_metadata": null, + "deletion_time": "", + "destroyed": false, + "version": 1 + } + } +}` + + // Start test server to simulate Vault cluster responses. + // reqCh := make(chan any) + defaultVaultServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, secretsResp) + })) + t.Cleanup(defaultVaultServer.Close) + + // Setup client with Vault config. + clientConfig := config.DefaultConfig() + clientConfig.TemplateConfig.DisableSandbox = true + clientConfig.VaultConfigs = map[string]*structsc.VaultConfig{ + structs.VaultDefaultCluster: { + Name: structs.VaultDefaultCluster, + Enabled: pointer.Of(true), + Addr: defaultVaultServer.URL, + }, + } + + taskDir := t.TempDir() + alloc := mock.MinAlloc() + task := alloc.Job.TaskGroups[0].Tasks[0] + + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, clientConfig.Region) + conf := &secretsHookConfig{ + logger: testlog.HCLogger(t), + lifecycle: trtesting.NewMockTaskHooks(), + events: &trtesting.MockEmitter{}, + clientConfig: clientConfig, + envBuilder: taskEnv, + } + secretHook := newSecretsHook(conf, []*structs.Secret{ + { + Name: "test_secret", + Provider: "vault", + Path: "/test/path", + Config: map[string]any{ + "engine": "kv_v2", + }, + }, + { + Name: "test_secret1", + Provider: "vault", + Path: "/test/path1", + Config: map[string]any{ + "engine": "kv_v2", + }, + }, + }) + + // Start template hook with a timeout context to ensure it exists. + req := &interfaces.TaskPrestartRequest{ + Alloc: alloc, + Task: task, + TaskDir: &allocdir.TaskDir{Dir: taskDir, SecretsDir: taskDir}, + } + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + err := secretHook.Prestart(ctx, req, &interfaces.TaskPrestartResponse{}) + must.NoError(t, err) + + exp := map[string]string{ + "secret.test_secret.secret": "secret", + "secret.test_secret1.secret": "secret", + } + + must.Eq(t, exp, taskEnv.Build().TaskSecrets) +} diff --git a/client/allocrunner/taskrunner/task_runner_hooks.go b/client/allocrunner/taskrunner/task_runner_hooks.go index 04faa353e..bbd57f661 100644 --- a/client/allocrunner/taskrunner/task_runner_hooks.go +++ b/client/allocrunner/taskrunner/task_runner_hooks.go @@ -55,40 +55,16 @@ func (tr *TaskRunner) initHooks() { tr.logmonHookConfig = newLogMonHookConfig(task.Name, task.LogConfig, tr.taskDir.LogDir) - // Add the hook resources - tr.hookResources = &hookResources{} - - // Create the task directory hook. This is run first to ensure the + // Create the task directory hook first. This is run first to ensure the // directory path exists for other hooks. - alloc := tr.Alloc() + tr.hookResources = &hookResources{} tr.runnerHooks = []interfaces.TaskHook{ newValidateHook(tr.clientConfig, hookLogger), newDynamicUsersHook(tr.killCtx, tr.driverCapabilities.DynamicWorkloadUsers, tr.logger, tr.users), newTaskDirHook(tr, hookLogger), newIdentityHook(tr, hookLogger), - newLogMonHook(tr, hookLogger), - newDispatchHook(alloc, hookLogger), - newVolumeHook(tr, hookLogger), - newArtifactHook(tr, tr.getter, hookLogger), - newStatsHook(tr, tr.clientConfig.StatsCollectionInterval, tr.clientConfig.PublishAllocationMetrics, hookLogger), - newDeviceHook(tr.devicemanager, hookLogger), - newAPIHook(tr.shutdownCtx, tr.clientConfig.APIListenerRegistrar, hookLogger), - newWranglerHook(tr.wranglers, task.Name, alloc.ID, task.UsesCores(), hookLogger), + newConsulHook(hookLogger, tr), } - - // If the task has a CSI block, add the hook. - if task.CSIPluginConfig != nil { - tr.runnerHooks = append(tr.runnerHooks, newCSIPluginSupervisorHook( - &csiPluginSupervisorHookConfig{ - clientStateDirPath: tr.clientConfig.StateDir, - events: tr, - runner: tr, - lifecycle: tr, - capabilities: tr.driverCapabilities, - logger: hookLogger, - })) - } - // If Vault is enabled, add the hook if task.Vault != nil && tr.vaultClientFunc != nil { tr.runnerHooks = append(tr.runnerHooks, newVaultHook(&vaultHookConfig{ @@ -105,13 +81,45 @@ func (tr *TaskRunner) initHooks() { })) } + if len(task.Secrets) > 0 { + tr.runnerHooks = append(tr.runnerHooks, newSecretsHook(&secretsHookConfig{ + logger: tr.logger, + lifecycle: tr, + events: tr, + clientConfig: tr.clientConfig, + envBuilder: tr.envBuilder, + nomadNamespace: tr.alloc.Job.Namespace, + }, task.Secrets)) + } + + alloc := tr.Alloc() + tr.runnerHooks = append(tr.runnerHooks, []interfaces.TaskHook{ + newLogMonHook(tr, hookLogger), + newDispatchHook(alloc, hookLogger), + newVolumeHook(tr, hookLogger), + newArtifactHook(tr, tr.getter, hookLogger), + newStatsHook(tr, tr.clientConfig.StatsCollectionInterval, tr.clientConfig.PublishAllocationMetrics, hookLogger), + newDeviceHook(tr.devicemanager, hookLogger), + newAPIHook(tr.shutdownCtx, tr.clientConfig.APIListenerRegistrar, hookLogger), + newWranglerHook(tr.wranglers, task.Name, alloc.ID, task.UsesCores(), hookLogger), + }...) + + // If the task has a CSI block, add the hook. + if task.CSIPluginConfig != nil { + tr.runnerHooks = append(tr.runnerHooks, newCSIPluginSupervisorHook( + &csiPluginSupervisorHookConfig{ + clientStateDirPath: tr.clientConfig.StateDir, + events: tr, + runner: tr, + lifecycle: tr, + capabilities: tr.driverCapabilities, + logger: hookLogger, + })) + } + // Get the consul namespace for the TG of the allocation. consulNamespace := tr.alloc.ConsulNamespaceForTask(tr.taskName) - // Add the consul hook (populates task secret dirs and sets the environment if - // consul tokens are present for the task). - tr.runnerHooks = append(tr.runnerHooks, newConsulHook(hookLogger, tr)) - // If there are templates is enabled, add the hook if len(task.Templates) != 0 { tr.runnerHooks = append(tr.runnerHooks, newTemplateHook(&templateHookConfig{ diff --git a/client/allocrunner/taskrunner/template/template.go b/client/allocrunner/taskrunner/template/template.go index 3c3a496ab..b36626cff 100644 --- a/client/allocrunner/taskrunner/template/template.go +++ b/client/allocrunner/taskrunner/template/template.go @@ -18,6 +18,7 @@ import ( ctconf "github.com/hashicorp/consul-template/config" "github.com/hashicorp/consul-template/manager" + "github.com/hashicorp/consul-template/renderer" "github.com/hashicorp/consul-template/signals" envparse "github.com/hashicorp/go-envparse" "github.com/hashicorp/go-hclog" @@ -132,6 +133,11 @@ type TaskTemplateManagerConfig struct { TaskID string Logger hclog.Logger + + // RenderFunc allows custom rendering of templated data, and overrides the + // Nomad custom RenderFunc used for sandboxing. This is currently used by + // the secrets block to hold all templated data in memory. + RenderFunc renderer.Renderer } // Validate validates the configuration. @@ -980,7 +986,11 @@ func newRunnerConfig(config *TaskTemplateManagerConfig, sandboxEnabled := isSandboxEnabled(config) sandboxDir := filepath.Dir(config.TaskDir) // alloc working directory conf.ReaderFunc = ReaderFn(config.TaskID, sandboxDir, sandboxEnabled) - conf.RendererFunc = RenderFn(config.TaskID, sandboxDir, sandboxEnabled) + if config.RenderFunc != nil { + conf.RendererFunc = config.RenderFunc + } else { + conf.RendererFunc = RenderFn(config.TaskID, sandboxDir, sandboxEnabled) + } conf.Finalize() return conf, nil } diff --git a/client/allocrunner/taskrunner/template/template_test.go b/client/allocrunner/taskrunner/template/template_test.go index 05188811d..55ace68c6 100644 --- a/client/allocrunner/taskrunner/template/template_test.go +++ b/client/allocrunner/taskrunner/template/template_test.go @@ -1699,6 +1699,7 @@ func TestTaskTemplateManager_Env_InterpolatedDest(t *testing.T) { map[string]string{"NOMAD_META_path": "exists"}, map[string]string{}, map[string]string{}, + map[string]string{}, d, "") vars, err := loadTemplateEnv(templates, taskEnv) diff --git a/client/commonplugins/commonplugins.go b/client/commonplugins/commonplugins.go new file mode 100644 index 000000000..58aa69349 --- /dev/null +++ b/client/commonplugins/commonplugins.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package commonplugins + +import ( + "bytes" + "context" + "errors" + "os/exec" + "syscall" + "time" + + "github.com/hashicorp/go-version" +) + +var ( + ErrPluginNotExists error = errors.New("plugin not found") + ErrPluginNotExecutable error = errors.New("plugin not executable") +) + +type CommonPlugin interface { + Fingerprint(ctx context.Context) (*PluginFingerprint, error) +} + +// CommonPlugins are expected to respond to 'fingerprint' calls with json that +// unmarshals to this struct. +type PluginFingerprint struct { + Version *version.Version `json:"version"` + Type *string `json:"type"` +} + +// runPlugin is a helper for executing the provided Cmd and capturing stdout/stderr. +// This helper implements both the soft and hard timeouts defined by the common +// plugins interface. +func runPlugin(cmd *exec.Cmd, killTimeout time.Duration) (stdout, stderr []byte, err error) { + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + done := make(chan error, 1) + cmd.Cancel = func() error { + var cancelErr error + + _ = cmd.Process.Signal(syscall.SIGTERM) + killTimer := time.NewTimer(killTimeout) + defer killTimer.Stop() + + select { + case <-killTimer.C: + cancelErr = cmd.Process.Kill() + case <-done: + } + + return cancelErr + } + + // start the command + stdout, err = cmd.Output() + done <- err + + stderr = errBuf.Bytes() + return +} diff --git a/client/commonplugins/secrets_plugin.go b/client/commonplugins/secrets_plugin.go new file mode 100644 index 000000000..110f23d6f --- /dev/null +++ b/client/commonplugins/secrets_plugin.go @@ -0,0 +1,126 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package commonplugins + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/helper" +) + +const ( + SecretsPluginDir = "secrets" + + // The timeout for the plugin command before it is send SIGTERM + SecretsCmdTimeout = 10 * time.Second + + // The timeout before the command is sent SIGKILL after being SIGTERM'd + SecretsKillTimeout = 2 * time.Second +) + +type SecretsPlugin interface { + CommonPlugin + Fetch(ctx context.Context, path string) (*SecretResponse, error) +} + +type SecretResponse struct { + Result map[string]string `json:"result"` + Error *string `json:"error"` +} + +type externalSecretsPlugin struct { + logger log.Logger + + // pluginPath is the path on the host to the plugin executable + pluginPath string + + // env is optional envVars passed to the plugin process + env map[string]string +} + +// NewExternalSecretsPlugin creates an instance of a secrets plugin by validating the plugin +// binary exists and is executable, and parsing any string key/value pairs out of the config +// which will be used as environment variables for Fetch. +func NewExternalSecretsPlugin(commonPluginDir string, name string, env map[string]string) (*externalSecretsPlugin, error) { + // validate plugin + executable := filepath.Join(commonPluginDir, SecretsPluginDir, name) + f, err := os.Stat(executable) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("%w: %q", ErrPluginNotExists, name) + } + return nil, err + } + if !helper.IsExecutable(f) { + return nil, fmt.Errorf("%w: %q", ErrPluginNotExecutable, name) + } + + return &externalSecretsPlugin{ + pluginPath: executable, + env: env, + }, nil +} + +func (e *externalSecretsPlugin) Fingerprint(ctx context.Context) (*PluginFingerprint, error) { + plugCtx, cancel := context.WithTimeout(ctx, SecretsCmdTimeout) + defer cancel() + + cmd := exec.CommandContext(plugCtx, e.pluginPath, "fingerprint") + cmd.Env = []string{ + "CPI_OPERATION=fingerprint", + } + + stdout, stderr, err := runPlugin(cmd, SecretsKillTimeout) + if err != nil { + return nil, err + } + + if len(stderr) > 0 { + e.logger.Info("fingerprint command stderr output", "msg", string(stderr)) + } + + res := &PluginFingerprint{} + if err := json.Unmarshal(stdout, &res); err != nil { + return nil, err + } + + return res, nil +} + +func (e *externalSecretsPlugin) Fetch(ctx context.Context, path string) (*SecretResponse, error) { + plugCtx, cancel := context.WithTimeout(ctx, SecretsCmdTimeout) + defer cancel() + + cmd := exec.CommandContext(plugCtx, e.pluginPath, "fetch", path) + cmd.Env = []string{ + "CPI_OPERATION=fetch", + } + + for env, val := range e.env { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", env, val)) + } + + stdout, stderr, err := runPlugin(cmd, SecretsKillTimeout) + if err != nil { + return nil, err + } + + if len(stderr) > 0 { + e.logger.Info("fetch command stderr output", "msg", string(stderr)) + } + + res := &SecretResponse{} + if err := json.Unmarshal(stdout, &res); err != nil { + return nil, err + } + + return res, nil +} diff --git a/client/commonplugins/secrets_plugin_test.go b/client/commonplugins/secrets_plugin_test.go new file mode 100644 index 000000000..ab3f6af3b --- /dev/null +++ b/client/commonplugins/secrets_plugin_test.go @@ -0,0 +1,144 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package commonplugins + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/hashicorp/nomad/ci" + "github.com/shoenig/test/must" +) + +func TestExternalSecretsPlugin_Fingerprint(t *testing.T) { + ci.Parallel(t) + + t.Run("runs successfully", func(t *testing.T) { + pluginDir, pluginName := setupTestPlugin(t, fmt.Appendf([]byte{}, "#!/bin/sh\ncat < 0 { + structsTask.Secrets = []*structs.Secret{} + for _, s := range apiTask.Secrets { + structsTask.Secrets = append(structsTask.Secrets, &structs.Secret{ + Name: s.Name, + Provider: s.Provider, + Path: s.Path, + Config: s.Config, + Env: s.Env, + }) + } + } + if apiTask.Consul != nil { structsTask.Consul = apiConsulToStructs(apiTask.Consul) } diff --git a/e2e/e2e_test.go b/e2e/e2e_test.go index 17242c74e..1f2ecf079 100644 --- a/e2e/e2e_test.go +++ b/e2e/e2e_test.go @@ -44,6 +44,7 @@ import ( _ "github.com/hashicorp/nomad/e2e/podman" _ "github.com/hashicorp/nomad/e2e/rescheduling" _ "github.com/hashicorp/nomad/e2e/scaling" + _ "github.com/hashicorp/nomad/e2e/secret" _ "github.com/hashicorp/nomad/e2e/spread" _ "github.com/hashicorp/nomad/e2e/vaultsecrets" _ "github.com/hashicorp/nomad/e2e/volume_mounts" diff --git a/e2e/secret/doc.go b/e2e/secret/doc.go new file mode 100644 index 000000000..eaa9b3f70 --- /dev/null +++ b/e2e/secret/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +// Package secret provides end-to-end tests for Nomad workloads with secret blocks. +// +// In order to run this test suite only, from the e2e directory you can trigger +// go test -v ./secret +package secret diff --git a/e2e/secret/input/custom_secret.hcl b/e2e/secret/input/custom_secret.hcl new file mode 100644 index 000000000..6297eb2b5 --- /dev/null +++ b/e2e/secret/input/custom_secret.hcl @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "secret_value" { + type = string + description = "The value of the randomly generated secret for this test" +} + +job "custom_secret" { + + constraint { + attribute = "${attr.kernel.name}" + value = "linux" + } + + update { + min_healthy_time = "1s" + } + + group "group" { + + task "task" { + + driver = "docker" + + config { + image = "busybox:1" + command = "/bin/sh" + args = ["-c", "sleep 300"] + } + + secret "testsecret" { + provider = "test_secret_plugin" + path = "some/path" + env { + // The custom plugin will output this as part of the result field + TEST_ENV = "${var.secret_value}" + } + } + + env { + TEST_SECRET = "${secret.testsecret.TEST_ENV}" + } + + resources { + cpu = 128 + memory = 64 + } + } + } +} diff --git a/e2e/secret/input/nomad_secret.hcl b/e2e/secret/input/nomad_secret.hcl new file mode 100644 index 000000000..af079aa86 --- /dev/null +++ b/e2e/secret/input/nomad_secret.hcl @@ -0,0 +1,50 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "secret_path" { + type = string + description = "The path of the vault secret" +} + +job "nomad_secret" { + + constraint { + attribute = "${attr.kernel.name}" + value = "linux" + } + + update { + min_healthy_time = "1s" + } + + group "group" { + + task "task" { + + driver = "docker" + + config { + image = "busybox:1" + command = "/bin/sh" + args = ["-c", "sleep 300"] + } + + secret "testsecret" { + provider = "nomad" + path = "${var.secret_path}" + config { + namespace = "default" + } + } + + env { + TEST_SECRET = "${secret.testsecret.key}" + } + + resources { + cpu = 128 + memory = 64 + } + } + } +} diff --git a/e2e/secret/input/vault_secret.hcl b/e2e/secret/input/vault_secret.hcl new file mode 100644 index 000000000..6a9027d68 --- /dev/null +++ b/e2e/secret/input/vault_secret.hcl @@ -0,0 +1,52 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "secret_path" { + type = string + description = "The path of the vault secret" +} + +job "vault_secret" { + + constraint { + attribute = "${attr.kernel.name}" + value = "linux" + } + + update { + min_healthy_time = "1s" + } + + group "group" { + + task "task" { + + driver = "docker" + + config { + image = "busybox:1" + command = "/bin/sh" + args = ["-c", "sleep 300"] + } + + vault {} + + secret "testsecret" { + provider = "vault" + path = "${var.secret_path}" + config { + engine = "kv_v2" + } + } + + env { + TEST_SECRET = "${secret.testsecret.key}" + } + + resources { + cpu = 128 + memory = 64 + } + } + } +} diff --git a/e2e/secret/secret_test.go b/e2e/secret/secret_test.go new file mode 100644 index 000000000..07a7929d5 --- /dev/null +++ b/e2e/secret/secret_test.go @@ -0,0 +1,117 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package secret + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/hashicorp/nomad/api" + e2e "github.com/hashicorp/nomad/e2e/e2eutil" + "github.com/hashicorp/nomad/e2e/v3/jobs3" + "github.com/hashicorp/nomad/helper/uuid" + "github.com/shoenig/test/must" +) + +const ns = "default" + +func TestVaultSecret(t *testing.T) { + // Lookup the cluster ID which is the KV backend path start. + clusterID, found := os.LookupEnv("CLUSTER_UNIQUE_IDENTIFIER") + if !found { + t.Fatal("CLUSTER_UNIQUE_IDENTIFIER env var not set") + } + + // Generate our pathing for Vault and a secret value that we will check as + // part of the test. + secretCLIPath := filepath.Join(ns, "vault_secret", "testsecret") + secretFullPath := filepath.Join(clusterID, "data", secretCLIPath) + secretValue := uuid.Generate() + + // Create the secret at the correct mount point for this E2E cluster and use + // the metadata delete command to permanently delete this when the test exits + e2e.MustCommand(t, "vault kv put -mount=%s %s key=%s", clusterID, secretCLIPath, secretValue) + e2e.CleanupCommand(t, "vault kv metadata delete -mount=%s %s", clusterID, secretCLIPath) + + submission, cleanJob := jobs3.Submit(t, + "./input/vault_secret.hcl", + jobs3.DisableRandomJobID(), // our path won't match the secret path with a random jobID + jobs3.Namespace(ns), + jobs3.Var("secret_path", secretFullPath), + ) + t.Cleanup(cleanJob) + + // Validate the nomad variable was read and parsed into the expected + // environment variable + out := submission.Exec("group", "task", []string{"env"}) + must.StrContains(t, out.Stdout, fmt.Sprintf("TEST_SECRET=%s", secretValue)) +} + +func TestNomadSecret(t *testing.T) { + // Generate our pathing for Vault and a secret value that we will check as + // part of the test. + secretFullPath := filepath.Join("nomad_secret", "testsecret") + secretValue := uuid.Generate() + + nomadClient := e2e.NomadClient(t) + + opts := &api.WriteOptions{Namespace: ns} + _, _, err := nomadClient.Variables().Create(&api.Variable{ + Namespace: ns, + Path: secretFullPath, + Items: map[string]string{"key": secretValue}, + }, opts) + must.NoError(t, err) + + // create an ACL policy and attach it to the job ID this test will run + myNamespacePolicy := api.ACLPolicy{ + Name: "secret-block-policy", + Rules: fmt.Sprintf(`namespace "%s" {variables {path "*" {capabilities = ["read"]}}}`, ns), + Description: "This namespace is for secrets block e2e testing", + JobACL: &api.JobACL{ + Namespace: ns, + JobID: "nomad_secret", + }, + } + _, err = nomadClient.ACLPolicies().Upsert(&myNamespacePolicy, nil) + must.NoError(t, err) + + t.Cleanup(func() { + nomadClient.ACLPolicies().Delete("secret-block-policy", nil) + }) + + submission, cleanJob := jobs3.Submit(t, + "./input/nomad_secret.hcl", + jobs3.DisableRandomJobID(), + jobs3.Namespace(ns), + jobs3.Var("secret_path", secretFullPath), + ) + t.Cleanup(cleanJob) + + // Validate the nomad variable was read and parsed into the expected + // environment variable + out := submission.Exec("group", "task", []string{"env"}) + must.StrContains(t, out.Stdout, fmt.Sprintf("TEST_SECRET=%s", secretValue)) +} + +func TestPluginSecret(t *testing.T) { + // Generate a uuid value for the secret plugins env block which it will output + // as a part of the result field. + secretValue := uuid.Generate() + + submission, cleanJob := jobs3.Submit(t, + "./input/custom_secret.hcl", + jobs3.DisableRandomJobID(), + jobs3.Namespace(ns), + jobs3.Var("secret_value", secretValue), + ) + t.Cleanup(cleanJob) + + // Validate the nomad variable was read and parsed into the expected + // environment variable + out := submission.Exec("group", "task", []string{"env"}) + must.StrContains(t, out.Stdout, fmt.Sprintf("TEST_SECRET=%s", secretValue)) +} diff --git a/e2e/terraform/packer/ubuntu-jammy-amd64/common-plugins/test_secret_plugin.sh b/e2e/terraform/packer/ubuntu-jammy-amd64/common-plugins/test_secret_plugin.sh new file mode 100755 index 000000000..e882cb4e3 --- /dev/null +++ b/e2e/terraform/packer/ubuntu-jammy-amd64/common-plugins/test_secret_plugin.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +fingerprint() { + echo {\"type\": \"secrets\", \"version\": \"0.0.1\"} +} + +fetch() { + # return any passed environment variables as output + echo '{"result":{'$(printenv | awk -F= '{printf "\"%s\":\"%s\",", $1, $2}' | sed 's/,$//')'}}' +} + +case "$1" in + fingerprint) + fingerprint + ;; + fetch) + fetch + ;; + *) + exit 1 +esac + diff --git a/e2e/terraform/packer/ubuntu-jammy-amd64/setup.sh b/e2e/terraform/packer/ubuntu-jammy-amd64/setup.sh index ebe72a26f..3bfd86d00 100755 --- a/e2e/terraform/packer/ubuntu-jammy-amd64/setup.sh +++ b/e2e/terraform/packer/ubuntu-jammy-amd64/setup.sh @@ -113,6 +113,10 @@ echo "Installing additional CNI network configs" # copy of nomad's "bridge" for connect+cni test (e2e/connect/) sudo mv /tmp/linux/cni/nomad_bridge_copy.conflist /opt/cni/config/ +echo "Installing CPI test plugins" +mkdir_for_root /opt/nomad/data/common_plugins/secrets +sudo mv /tmp/linux/common-plugins/test_secret_plugin.sh /opt/nomad/data/common_plugins/secrets/test_secret_plugin + # Podman echo "Installing Podman" sudo apt-get -y install podman catatonit diff --git a/jobspec2/hcl_conversions.go b/jobspec2/hcl_conversions.go index 443afe288..989242f55 100644 --- a/jobspec2/hcl_conversions.go +++ b/jobspec2/hcl_conversions.go @@ -270,7 +270,8 @@ func decodeTaskGroup(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.D diags = append(diags, moreDiags...) tgExtra := struct { - Vault *api.Vault `hcl:"vault,block"` + Vault *api.Vault `hcl:"vault,block"` + Secrets []*api.Secret `hcl:"secret,block"` }{} extra, _ := gohcl.ImpliedBodySchema(tgExtra) @@ -286,6 +287,14 @@ func decodeTaskGroup(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.D diags = append(diags, hclDecoder.DecodeBody(b.Body, ctx, v)...) tgExtra.Vault = v } + if b.Type == "secret" { + v := &api.Secret{} + diags = append(diags, hclDecoder.DecodeBody(b.Body, ctx, v)...) + if len(b.Labels) == 1 { + v.Name = b.Labels[0] + } + tgExtra.Secrets = append(tgExtra.Secrets, v) + } } d := newHCLDecoder() @@ -304,6 +313,16 @@ func decodeTaskGroup(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.D } } + if len(tgExtra.Secrets) > 0 { + for _, t := range tg.Tasks { + if len(t.Secrets) == 0 { + t.Secrets = tgExtra.Secrets + } else { + t.Secrets = append(t.Secrets, t.Secrets...) + } + } + } + if tg.Scaling != nil { if tg.Scaling.Type == "" { tg.Scaling.Type = "horizontal" diff --git a/jobspec2/parse.go b/jobspec2/parse.go index 4b91ba76f..289a118ac 100644 --- a/jobspec2/parse.go +++ b/jobspec2/parse.go @@ -133,6 +133,7 @@ func decode(c *jobConfig) error { diags = append(diags, decodeMapInterfaceType(&c.Job, c.EvalContext())...) diags = append(diags, decodeMapInterfaceType(&c.Tasks, c.EvalContext())...) diags = append(diags, decodeMapInterfaceType(&c.Vault, c.EvalContext())...) + diags = append(diags, decodeMapInterfaceType(&c.Secrets, c.EvalContext())...) if diags.HasErrors() { return diags diff --git a/jobspec2/parse_job.go b/jobspec2/parse_job.go index 4ea2edda0..a2c80e84c 100644 --- a/jobspec2/parse_job.go +++ b/jobspec2/parse_job.go @@ -54,6 +54,12 @@ func normalizeJob(jc *jobConfig) { t.Vault = jc.Vault } + if len(t.Secrets) == 0 { + t.Secrets = jc.Secrets + } else { + t.Secrets = append(t.Secrets, jc.Secrets...) + } + //COMPAT To preserve compatibility with pre-1.7 agents, move the default // identity to Task.Identity. defaultIdx := -1 diff --git a/jobspec2/types.config.go b/jobspec2/types.config.go index f183be970..13ccaee78 100644 --- a/jobspec2/types.config.go +++ b/jobspec2/types.config.go @@ -20,6 +20,7 @@ const ( localsLabel = "locals" vaultLabel = "vault" taskLabel = "task" + secretLabel = "secret" inputVariablesAccessor = "var" localsAccessor = "local" @@ -31,8 +32,9 @@ type jobConfig struct { ParseConfig *ParseConfig - Vault *api.Vault `hcl:"vault,block"` - Tasks []*api.Task `hcl:"task,block"` + Vault *api.Vault `hcl:"vault,block"` + Secrets []*api.Secret `hcl:"secret,block"` + Tasks []*api.Task `hcl:"task,block"` InputVariables Variables LocalVariables Variables @@ -174,6 +176,13 @@ func (c *jobConfig) decodeTopLevelExtras(content *hcl.BodyContent, ctx *hcl.Eval t.Name = b.Labels[0] c.Tasks = append(c.Tasks, t) } + } else if b.Type == secretLabel { + t := &api.Secret{} + diags = append(diags, hclDecoder.DecodeBody(b.Body, ctx, t)...) + if len(b.Labels) == 1 { + t.Name = b.Labels[0] + c.Secrets = append(c.Secrets, t) + } } } @@ -277,6 +286,7 @@ func (c *jobConfig) decodeJob(content *hcl.BodyContent, ctx *hcl.EvalContext) hc extra, remain, mdiags := body.PartialContent(&hcl.BodySchema{ Blocks: []hcl.BlockHeaderSchema{ {Type: "vault"}, + {Type: "secret", LabelNames: []string{"name"}}, {Type: "task", LabelNames: []string{"name"}}, }, }) diff --git a/nomad/job_endpoint_hooks.go b/nomad/job_endpoint_hooks.go index e32781481..b1788dc2e 100644 --- a/nomad/job_endpoint_hooks.go +++ b/nomad/job_endpoint_hooks.go @@ -279,6 +279,8 @@ func (jobImpliedConstraints) Mutate(j *structs.Job) (*structs.Job, []error, erro taskScheduleTaskGroups := j.RequiredScheduleTask() + secretBlocks := j.Secrets() + // Hot path where none of our things require constraints. // // [UPDATE THIS] if you are adding a new constraint thing! @@ -286,7 +288,8 @@ func (jobImpliedConstraints) Mutate(j *structs.Job) (*structs.Job, []error, erro nativeServiceDisco.Empty() && len(consulServiceDisco) == 0 && numaTaskGroups.Empty() && bridgeNetworkingTaskGroups.Empty() && transparentProxyTaskGroups.Empty() && - taskScheduleTaskGroups.Empty() { + taskScheduleTaskGroups.Empty() && + len(secretBlocks) == 0 { return j, nil, nil } @@ -302,6 +305,12 @@ func (jobImpliedConstraints) Mutate(j *structs.Job) (*structs.Job, []error, erro mutateConstraint(constraintMatcherLeft, tg, vaultConstraintFn(vaultBlock)) } + for _, secretProviders := range secretBlocks { + for _, provider := range secretProviders { + mutateConstraint(constraintMatcherLeft, tg, secretsConstraintFn(provider)) + } + } + // If the task group utilizes NUMA resources, run the mutator. if numaTaskGroups.Contains(tg.Name) { mutateConstraint(constraintMatcherFull, tg, numaVersionConstraint) @@ -382,6 +391,17 @@ func vaultConstraintFn(vault *structs.Vault) *structs.Constraint { return vaultConstraint } +// secretsConstraintFn returns a constraint that checks for the existence of a +// fingerprinted secrets plugin. This is to support upgrades to 1.11 where a nomad +// server follower may not be upgraded yet and attempt to place a job on a client +// that has not been upgraded. This should be removed in Nomad 1.14. +func secretsConstraintFn(provider string) *structs.Constraint { + return &structs.Constraint{ + LTarget: fmt.Sprintf("${attr.plugins.secrets.%s.version}", provider), + Operand: structs.ConstraintAttributeIsSet, + } +} + // consulConstraintFn returns a service discovery constraint that matches the // fingerprint of the requested Consul cluster. This is to support Nomad // Enterprise but neither the fingerprint or non-default cluster are allowed diff --git a/nomad/job_endpoint_hooks_test.go b/nomad/job_endpoint_hooks_test.go index d0e83c8a1..2e4f8417c 100644 --- a/nomad/job_endpoint_hooks_test.go +++ b/nomad/job_endpoint_hooks_test.go @@ -1280,6 +1280,122 @@ func Test_jobImpliedConstraints_Mutate(t *testing.T) { expectedOutputWarnings: nil, expectedOutputError: nil, }, + { + name: "task with secret", + inputJob: &structs.Job{ + Name: "example", + TaskGroups: []*structs.TaskGroup{ + { + Name: "group-with-secret", + Tasks: []*structs.Task{ + { + Name: "task-with-secret", + Secrets: []*structs.Secret{ + { + Provider: "test", + }, + }, + }, + }, + }, + }, + }, + expectedOutputJob: &structs.Job{ + Name: "example", + TaskGroups: []*structs.TaskGroup{ + { + Name: "group-with-secret", + Tasks: []*structs.Task{ + { + Name: "task-with-secret", + Secrets: []*structs.Secret{ + { + Provider: "test", + }, + }, + }, + }, + Constraints: []*structs.Constraint{ + { + LTarget: "${attr.plugins.secrets.test.version}", + Operand: structs.ConstraintAttributeIsSet, + }, + }, + }, + }, + }, + }, + { + name: "tasks with overlapping secrets", + inputJob: &structs.Job{ + Name: "example", + TaskGroups: []*structs.TaskGroup{ + { + Name: "group-with-secret", + Tasks: []*structs.Task{ + { + Name: "task-with-secret", + Secrets: []*structs.Secret{ + { + Provider: "foo", + }, + }, + }, + { + Name: "task-with-secret", + Secrets: []*structs.Secret{ + { + Provider: "foo", + }, + { + Provider: "bar", + }, + }, + }, + }, + }, + }, + }, + expectedOutputJob: &structs.Job{ + Name: "example", + TaskGroups: []*structs.TaskGroup{ + { + Name: "group-with-secret", + Tasks: []*structs.Task{ + { + Name: "task-with-secret", + Secrets: []*structs.Secret{ + { + Provider: "foo", + }, + }, + }, + { + Name: "task-with-secret", + Secrets: []*structs.Secret{ + { + Provider: "foo", + }, + { + Provider: "bar", + }, + }, + }, + }, + Constraints: []*structs.Constraint{ + { + LTarget: "${attr.plugins.secrets.foo.version}", + Operand: structs.ConstraintAttributeIsSet, + }, + { + LTarget: "${attr.plugins.secrets.bar.version}", + Operand: structs.ConstraintAttributeIsSet, + }, + }, + }, + }, + }, + }, } for _, tc := range testCases { diff --git a/nomad/structs/diff.go b/nomad/structs/diff.go index bb1755436..8a73398de 100644 --- a/nomad/structs/diff.go +++ b/nomad/structs/diff.go @@ -537,6 +537,11 @@ func (t *Task) Diff(other *Task, contextual bool) (*TaskDiff, error) { diff.Objects = append(diff.Objects, vDiff) } + secDiffs := secretsDiffs(t.Secrets, other.Secrets, contextual) + if secDiffs != nil { + diff.Objects = append(diff.Objects, secDiffs...) + } + // Consul diff consulDiff := primitiveObjectDiff(t.Consul, other.Consul, nil, "Consul", contextual) if consulDiff != nil { @@ -578,6 +583,61 @@ func (t *Task) Diff(other *Task, contextual bool) (*TaskDiff, error) { return diff, nil } +func secretsDiff(old, new *Secret, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Secret"} + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &Secret{} + diff.Type = DiffTypeAdded + } else if new == nil { + new = &Secret{} + diff.Type = DiffTypeDeleted + } else { + diff.Type = DiffTypeEdited + } + + // Diff the primitive fields. + oldPrimitiveFlat := flatmap.Flatten(old, nil, false) + newPrimitiveFlat := flatmap.Flatten(new, nil, false) + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + return diff +} + +// secretsDiffs diffs a set of secrets. The comparator for whether a secret +// is new/edited/deleted is the secret Name field. +func secretsDiffs(old, new []*Secret, contextual bool) []*ObjectDiff { + var diffs []*ObjectDiff + + oldMap := map[string]*Secret{} + newMap := map[string]*Secret{} + + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + for k, v := range oldMap { + if diff := secretsDiff(v, newMap[k], contextual); diff != nil { + diffs = append(diffs, diff) + } + } + for k, v := range newMap { + // diff any newly added secrets + if _, ok := oldMap[k]; !ok { + if diff := secretsDiff(nil, v, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + + return diffs +} + func actionDiff(old, new *Action, contextual bool) *ObjectDiff { diff := &ObjectDiff{Type: DiffTypeNone, Name: "Action"} var oldPrimitiveFlat, newPrimitiveFlat map[string]string diff --git a/nomad/structs/diff_test.go b/nomad/structs/diff_test.go index fb845f96a..eeeede38b 100644 --- a/nomad/structs/diff_test.go +++ b/nomad/structs/diff_test.go @@ -9466,6 +9466,174 @@ func TestTaskDiff(t *testing.T) { }, }, }, + { + Name: "Secret edited", + Old: &Task{ + Secrets: []*Secret{ + { + Name: "foo", + Provider: "bar", + Path: "/foo/bar", + Config: map[string]any{ + "foo": "bar", + }, + Env: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + New: &Task{ + Secrets: []*Secret{ + { + Name: "foo", + Provider: "bar1", + Path: "/foo/bar1", + Config: map[string]any{ + "foo": "bar1", + }, + Env: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + Expected: &TaskDiff{ + Type: DiffTypeEdited, + Objects: []*ObjectDiff{ + { + Type: DiffTypeEdited, + Name: "Secret", + Fields: []*FieldDiff{ + { + Type: DiffTypeEdited, + Name: "Config[foo]", + Old: "bar", + New: "bar1", + }, + { + Type: DiffTypeEdited, + Name: "Path", + Old: "/foo/bar", + New: "/foo/bar1", + }, + { + Type: DiffTypeEdited, + Name: "Provider", + Old: "bar", + New: "bar1", + }, + }, + }, + }, + }, + }, + { + Name: "Secret added", + Old: &Task{ + Secrets: []*Secret{}, + }, + New: &Task{ + Secrets: []*Secret{ + { + Name: "foo", + Provider: "bar", + Path: "/foo/bar", + Config: map[string]any{ + "foo": "bar", + }, + }, + }, + }, + Expected: &TaskDiff{ + Type: DiffTypeEdited, + Objects: []*ObjectDiff{ + { + Type: DiffTypeAdded, + Name: "Secret", + Fields: []*FieldDiff{ + { + Type: DiffTypeAdded, + Name: "Config[foo]", + Old: "", + New: "bar", + }, + { + Type: DiffTypeAdded, + Name: "Name", + Old: "", + New: "foo", + }, + { + Type: DiffTypeAdded, + Name: "Path", + Old: "", + New: "/foo/bar", + }, + { + Type: DiffTypeAdded, + Name: "Provider", + Old: "", + New: "bar", + }, + }, + }, + }, + }, + }, + { + Name: "Secret deleted", + Old: &Task{ + Secrets: []*Secret{ + { + Name: "foo", + Provider: "bar", + Path: "/foo/bar", + Config: map[string]any{ + "foo": "bar", + }, + }, + }, + }, + New: &Task{ + Secrets: []*Secret{}, + }, + Expected: &TaskDiff{ + Type: DiffTypeEdited, + Objects: []*ObjectDiff{ + { + Type: DiffTypeDeleted, + Name: "Secret", + Fields: []*FieldDiff{ + { + Type: DiffTypeDeleted, + Name: "Config[foo]", + Old: "bar", + New: "", + }, + { + Type: DiffTypeDeleted, + Name: "Name", + Old: "foo", + New: "", + }, + { + Type: DiffTypeDeleted, + Name: "Path", + Old: "/foo/bar", + New: "", + }, + { + Type: DiffTypeDeleted, + Name: "Provider", + Old: "bar", + New: "", + }, + }, + }, + }, + }, + }, } for _, c := range cases { diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index f906c8cee..25769f421 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -219,11 +219,17 @@ const ( RateMetricRead = "read" RateMetricList = "list" RateMetricWrite = "write" + + // Vault secret provider used in task validation + SecretProviderVault = "vault" ) var ( // validNamespaceName is used to validate a namespace name validNamespaceName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$") + + // validSecretName is used to validate a secret name + validSecretName = regexp.MustCompile("^[a-zA-Z0-9_]{1,128}$") ) // NamespacedID is a tuple of an ID and a namespace @@ -5091,6 +5097,33 @@ func (j *Job) Vault() map[string]map[string]*Vault { return blocks } +// Secrets returns the set of secrets per task group, per task +func (j *Job) Secrets() map[string][]string { + blocks := make(map[string][]string, len(j.TaskGroups)) + + for _, tg := range j.TaskGroups { + secrets := []string{} + + for _, task := range tg.Tasks { + if len(task.Secrets) == 0 { + continue + } + + for _, s := range task.Secrets { + if !slices.Contains(secrets, s.Provider) { + secrets = append(secrets, s.Provider) + } + } + } + + if len(secrets) != 0 { + blocks[tg.Name] = secrets + } + } + + return blocks +} + // ConnectTasks returns the set of Consul Connect enabled tasks defined on the // job that will require a Service Identity token in the case that Consul ACLs // are enabled. The TaskKind.Value is the name of the Consul service. @@ -7787,6 +7820,9 @@ type Task struct { // have access to. Vault *Vault + // List of secrets for the task. + Secrets []*Secret + // Consul configuration specific to this task. If uset, falls back to the // group's Consul field. Consul *Consul @@ -8286,6 +8322,23 @@ func (t *Task) Validate(jobType string, tg *TaskGroup) error { } } + secrets := make(map[string]bool) + for _, s := range t.Secrets { + if _, ok := secrets[s.Name]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Duplicate secret %q found", s.Name)) + } else { + secrets[s.Name] = true + } + + if s.Provider == SecretProviderVault && t.Vault == nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Secret %q has provider \"vault\" but no vault block", s.Name)) + } + + if err := s.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Secret %q is invalid: %w", s.Name, err)) + } + } + return mErr.ErrorOrNil() } @@ -10383,6 +10436,102 @@ func (v *Vault) Validate() error { return mErr.ErrorOrNil() } +type Secret struct { + Name string + Provider string + Path string + Config map[string]any + Env map[string]string +} + +func (s *Secret) Equal(o *Secret) bool { + if s == nil || o == nil { + return s == o + } + + switch { + case s.Name != o.Name: + return false + case s.Provider != o.Provider: + return false + case s.Path != o.Path: + return false + case !maps.Equal(s.Config, o.Config): + return false + case !maps.Equal(s.Env, o.Env): + return false + } + + return true +} + +func (s *Secret) Copy() *Secret { + if s == nil { + return nil + } + + confCopy, err := copystructure.Copy(s.Config) + if err != nil { + // The default Copy() implementation should not return + // an error, so we should not reach this code path. + panic(err.Error()) + } + + return &Secret{ + Name: s.Name, + Provider: s.Provider, + Path: s.Path, + Config: confCopy.(map[string]any), + Env: maps.Clone(s.Env), + } +} + +func (s *Secret) Validate() error { + if s == nil { + return nil + } + + var mErr multierror.Error + + if s.Name == "" { + _ = multierror.Append(&mErr, errors.New("secret name cannot be empty")) + } + + if !validSecretName.MatchString(s.Name) { + _ = multierror.Append(&mErr, fmt.Errorf("secret name must match regex %s", validSecretName)) + } + + if s.Provider == "" { + _ = multierror.Append(&mErr, errors.New("secret provider cannot be empty")) + } + + if s.Path == "" { + _ = multierror.Append(&mErr, errors.New("secret path cannot be empty")) + } + + if s.Provider == "nomad" || s.Provider == "vault" { + if len(s.Env) > 0 { + _ = multierror.Append(&mErr, fmt.Errorf("%s provider cannot use the env block", s.Provider)) + } + } else { + if len(s.Config) > 0 { + _ = multierror.Append(&mErr, fmt.Errorf("custom plugin provider %s cannot use the config block", s.Provider)) + } + } + + return mErr.ErrorOrNil() +} + +func (s *Secret) Canonicalize() { + if s == nil { + return + } + + if len(s.Config) == 0 { + s.Config = nil + } +} + const ( // DeploymentStatuses are the various states a deployment can be be in DeploymentStatusRunning = "running" diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index 66b8879e0..50b849ab7 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -4,6 +4,7 @@ package structs import ( + "errors" "fmt" "net" "os" @@ -6459,6 +6460,186 @@ func TestVault_Canonicalize(t *testing.T) { require.Equal(t, VaultChangeModeRestart, v.ChangeMode) } +func TestTask_Validate_Secret(t *testing.T) { + cases := []struct { + name string + task *Task + expErr bool + }{ + { + name: "errors with vault provider and no vault block", + task: &Task{ + Secrets: []*Secret{ + { + Name: "test", + Provider: "vault", + }, + }, + }, + expErr: true, + }, + { + name: "succeeds with vault provider and vault block", + task: &Task{ + Vault: &Vault{}, + Secrets: []*Secret{ + { + Name: "test", + Provider: "vault", + }, + }, + }, + expErr: false, + }, + } + + vaultProviderErr := "has provider \"vault\" but no vault block" + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := tc.task.Validate(JobTypeService, &TaskGroup{}) + + // Validate will return errors here, we just want to validate + // it contains the above vaultProviderErr or not + if tc.expErr { + must.ErrorContains(t, err, vaultProviderErr) + } else { + // no ErrorNotContains so use string matching + must.StrNotContains(t, err.Error(), vaultProviderErr) + } + }) + } +} + +func TestSecrets_Copy(t *testing.T) { + ci.Parallel(t) + s := &Secret{ + Name: "test-secret", + Provider: "test-provider", + Path: "/test/path", + Config: map[string]any{ + "some-key": map[string]any{ + "nested-key": "nested-value", + }, + }, + } + ns := s.Copy() + + must.Eq(t, s.Name, ns.Name) + must.Eq(t, s.Provider, ns.Provider) + must.Eq(t, s.Path, ns.Path) + must.Eq(t, s.Config, ns.Config) + + // make sure nested maps are copied correctly + s.Config["some-key"].(map[string]any)["nested-key"] = "new-value" + + must.NotEq(t, s.Config, ns.Config) +} + +func TestSecrets_Validate(t *testing.T) { + ci.Parallel(t) + testCases := []struct { + name string + secret *Secret + expectErr error + }{ + { + name: "valid secret", + secret: &Secret{ + Name: "testsecret", + Provider: "test-provier", + Path: "test-path", + }, + expectErr: nil, + }, + { + name: "missing name", + secret: &Secret{ + Path: "test-path", + Provider: "test-provider", + }, + expectErr: errors.New("secret name cannot be empty"), + }, + { + name: "missing provider", + secret: &Secret{ + Name: "testsecret", + Path: "test-path", + }, + expectErr: errors.New("secret provider cannot be empty"), + }, + { + name: "missing path", + secret: &Secret{ + Name: "testsecret", + Provider: "test-provier", + }, + expectErr: errors.New("secret path cannot be empty"), + }, + { + name: "nomad provider fails with env", + secret: &Secret{ + Name: "test-secret", + Provider: "nomad", + Path: "test", + Env: map[string]string{ + "test": "test", + }, + }, + expectErr: errors.New("nomad provider cannot use the env block"), + }, + { + name: "vault provider fails with env", + secret: &Secret{ + Name: "test-secret", + Provider: "vault", + Path: "test", + Env: map[string]string{ + "test": "test", + }, + }, + expectErr: errors.New("vault provider cannot use the env block"), + }, + { + name: "custom provider fails with config", + secret: &Secret{ + Name: "test-secret", + Provider: "test", + Path: "test", + Config: map[string]any{ + "test": "test", + }, + }, + expectErr: errors.New("custom plugin provider test cannot use the config block"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.secret.Validate() + if tc.expectErr != nil { + must.ErrorContains(t, err, tc.expectErr.Error()) + } else { + must.NoError(t, err) + } + }) + } + +} + +func TestSecrets_Canonicalize(t *testing.T) { + ci.Parallel(t) + s := &Secret{ + Name: "test-secret", + Provider: "test-provider", + Path: "/test/path", + Config: make(map[string]any), + } + + s.Canonicalize() + + must.Nil(t, s.Config) +} + func TestParameterizedJobConfig_Validate(t *testing.T) { ci.Parallel(t) diff --git a/scheduler/feasible/feasible.go b/scheduler/feasible/feasible.go index 722a982b0..974f5f234 100644 --- a/scheduler/feasible/feasible.go +++ b/scheduler/feasible/feasible.go @@ -35,6 +35,7 @@ const ( FilterConstraintCSIVolumeGCdAllocationTemplate = "CSI volume %s has exhausted its available writer claims and is claimed by a garbage collected allocation %s; waiting for claim to be released" FilterConstraintDrivers = "missing drivers" FilterConstraintDevices = "missing devices" + FilterConstraintSecrets = "missing secrets provider" FilterConstraintsCSIPluginTopology = "did not meet topology requirement" ) @@ -161,6 +162,48 @@ func NewRandomIterator(ctx Context, nodes []*structs.Node) *StaticIterator { return NewStaticIterator(ctx, nodes) } +type SecretsProviderChecker struct { + ctx Context + secrets map[string]struct{} +} + +func NewSecretsProviderChecker(ctx Context, secrets map[string]struct{}) *SecretsProviderChecker { + return &SecretsProviderChecker{ + ctx: ctx, + secrets: secrets, + } +} + +func (s *SecretsProviderChecker) SetSecrets(secrets map[string]struct{}) { + s.secrets = secrets + +} + +func (s *SecretsProviderChecker) Feasible(option *structs.Node) bool { + // Use this node if possible + if s.hasSecrets(option) { + return true + } + s.ctx.Metrics().FilterNode(option, FilterConstraintSecrets) + return false +} + +// hasSecrets is used to check if the node has all the appropriate +// secrets provider for this task group. Secrets providers are registered +// as node attributes "secrets.plugin.*.version". +func (s *SecretsProviderChecker) hasSecrets(option *structs.Node) bool { + for secret := range s.secrets { + secretStr := fmt.Sprintf("plugins.secrets.%s.version", secret) + + _, ok := option.Attributes[secretStr] + if !ok { + return false + } + } + + return true +} + // HostVolumeChecker is a FeasibilityChecker which returns whether a node has // the host volumes necessary to schedule a task group. type HostVolumeChecker struct { diff --git a/scheduler/feasible/feasible_test.go b/scheduler/feasible/feasible_test.go index 2947a47e0..a307a60dc 100644 --- a/scheduler/feasible/feasible_test.go +++ b/scheduler/feasible/feasible_test.go @@ -88,6 +88,59 @@ func TestRandomIterator(t *testing.T) { } +func TestSecretsChecker(t *testing.T) { + ci.Parallel(t) + + _, ctx := MockContext(t) + nodes := []*structs.Node{ + mock.Node(), + mock.Node(), + mock.Node(), + } + nodes[1].Attributes["plugins.secrets.foo.version"] = "1.0.0" + nodes[2].Attributes["plugins.secrets.bar.version"] = "1.0.0" + + m := map[string]struct{}{ + "foo": struct{}{}, + } + checker := NewSecretsProviderChecker(ctx, m) + + cases := []struct { + Node *structs.Node + Secrets map[string]struct{} + Result bool + }{ + { + Node: nodes[0], + Secrets: map[string]struct{}{ + "foo": {}, + }, + Result: false, + }, + { + Node: nodes[1], + Secrets: map[string]struct{}{ + "foo": {}, + }, + Result: true, + }, + { + Node: nodes[2], + Secrets: map[string]struct{}{ + "foo": {}, + }, + Result: false, + }, + } + + for i, c := range cases { + checker.SetSecrets(c.Secrets) + if act := checker.Feasible(c.Node); act != c.Result { + t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result) + } + } +} + func TestHostVolumeChecker_Static(t *testing.T) { ci.Parallel(t) diff --git a/scheduler/feasible/stack.go b/scheduler/feasible/stack.go index 1dd0b17a8..9941cbe97 100644 --- a/scheduler/feasible/stack.go +++ b/scheduler/feasible/stack.go @@ -61,6 +61,7 @@ type GenericStack struct { taskGroupHostVolumes *HostVolumeChecker taskGroupCSIVolumes *CSIVolumeChecker taskGroupNetwork *NetworkChecker + taskGroupSecrets *SecretsProviderChecker distinctHostsConstraint *DistinctHostsIterator distinctPropertyConstraint *DistinctPropertyIterator @@ -164,6 +165,7 @@ func (s *GenericStack) Select(tg *structs.TaskGroup, options *SelectOptions) *Ra if len(tg.Networks) > 0 { s.taskGroupNetwork.SetNetwork(tg.Networks[0]) } + s.taskGroupSecrets.SetSecrets(tgConstr.Secrets) s.distinctHostsConstraint.SetTaskGroup(tg) s.distinctPropertyConstraint.SetTaskGroup(tg) s.wrappedChecks.SetTaskGroup(tg.Name) @@ -218,6 +220,7 @@ type SystemStack struct { taskGroupHostVolumes *HostVolumeChecker taskGroupCSIVolumes *CSIVolumeChecker taskGroupNetwork *NetworkChecker + taskGroupSecrets *SecretsProviderChecker distinctPropertyConstraint *DistinctPropertyIterator binPack *BinPackIterator @@ -258,6 +261,9 @@ func NewSystemStack(sysbatch bool, ctx Context) *SystemStack { // Filter on available client networks s.taskGroupNetwork = NewNetworkChecker(ctx) + // Filter on task group secrets + s.taskGroupSecrets = NewSecretsProviderChecker(ctx, nil) + // Create the feasibility wrapper which wraps all feasibility checks in // which feasibility checking can be skipped if the computed node class has // previously been marked as eligible or ineligible. Generally this will be @@ -268,6 +274,7 @@ func NewSystemStack(sysbatch bool, ctx Context) *SystemStack { s.taskGroupConstraint, s.taskGroupDevices, s.taskGroupNetwork, + s.taskGroupSecrets, } avail := []FeasibilityChecker{ s.taskGroupHostVolumes, @@ -359,6 +366,7 @@ func (s *SystemStack) Select(tg *structs.TaskGroup, options *SelectOptions) *Ran if len(tg.Networks) > 0 { s.taskGroupNetwork.SetNetwork(tg.Networks[0]) } + s.taskGroupSecrets.SetSecrets(tgConstr.Secrets) s.wrappedChecks.SetTaskGroup(tg.Name) s.distinctPropertyConstraint.SetTaskGroup(tg) s.binPack.SetTaskGroup(tg) @@ -409,6 +417,9 @@ func NewGenericStack(batch bool, ctx Context) *GenericStack { // Filter on available client networks s.taskGroupNetwork = NewNetworkChecker(ctx) + // Filter on task group secrets + s.taskGroupSecrets = NewSecretsProviderChecker(ctx, nil) + // Create the feasibility wrapper which wraps all feasibility checks in // which feasibility checking can be skipped if the computed node class has // previously been marked as eligible or ineligible. Generally this will be @@ -419,6 +430,7 @@ func NewGenericStack(batch bool, ctx Context) *GenericStack { s.taskGroupConstraint, s.taskGroupDevices, s.taskGroupNetwork, + s.taskGroupSecrets, } avail := []FeasibilityChecker{ s.taskGroupHostVolumes, @@ -480,12 +492,17 @@ func TaskGroupConstraints(tg *structs.TaskGroup) TgConstrainTuple { c := TgConstrainTuple{ Constraints: make([]*structs.Constraint, 0, len(tg.Constraints)), Drivers: make(map[string]struct{}), + Secrets: make(map[string]struct{}), } c.Constraints = append(c.Constraints, tg.Constraints...) for _, task := range tg.Tasks { c.Drivers[task.Driver] = struct{}{} c.Constraints = append(c.Constraints, task.Constraints...) + + for _, s := range task.Secrets { + c.Secrets[s.Provider] = struct{}{} + } } return c @@ -498,4 +515,7 @@ type TgConstrainTuple struct { // The set of required Drivers within the task group. Drivers map[string]struct{} + + // The secret providers being utilized by the task group + Secrets map[string]struct{} } diff --git a/scheduler/feasible/stack_test.go b/scheduler/feasible/stack_test.go index 6f736ac3b..b44f8a772 100644 --- a/scheduler/feasible/stack_test.go +++ b/scheduler/feasible/stack_test.go @@ -220,6 +220,36 @@ func TestServiceStack_Select_DriverFilter(t *testing.T) { must.Eq(t, zero, node.Node) } +func TestServiceStack_Select_Secrets(t *testing.T) { + ci.Parallel(t) + + _, ctx := MockContext(t) + nodes := []*structs.Node{ + mock.Node(), + mock.Node(), + } + zero := nodes[0] + zero.Attributes["plugins.secrets.foo.version"] = "0.0.1" + must.NoError(t, zero.ComputeClass()) + + stack := NewGenericStack(false, ctx) + stack.SetNodes(nodes) + + job := mock.Job() + job.TaskGroups[0].Tasks[0].Secrets = []*structs.Secret{ + { + Provider: "foo", + }, + } + stack.SetJob(job) + + selectOptions := &SelectOptions{} + node := stack.Select(job.TaskGroups[0], selectOptions) + must.NotNil(t, node, must.Sprintf("missing node %#v", ctx.Metrics())) + + must.Eq(t, zero, node.Node) +} + func TestServiceStack_Select_HostVolume(t *testing.T) { ci.Parallel(t) @@ -571,6 +601,44 @@ func TestSystemStack_Select_DriverFilter(t *testing.T) { must.Nil(t, node) } +func TestSystemStack_Select_Secrets(t *testing.T) { + ci.Parallel(t) + + _, ctx := MockContext(t) + nodes := []*structs.Node{ + mock.Node(), + mock.Node(), + } + zero := nodes[0] + zero.Attributes["plugins.secrets.foo.version"] = "0.0.1" + must.NoError(t, zero.ComputeClass()) + + stack := NewSystemStack(false, ctx) + stack.SetNodes(nodes) + + job := mock.Job() + job.TaskGroups[0].Tasks[0].Secrets = []*structs.Secret{ + { + Provider: "foo", + }, + } + stack.SetJob(job) + + selectOptions := &SelectOptions{} + node := stack.Select(job.TaskGroups[0], selectOptions) + must.NotNil(t, node, must.Sprintf("missing node %#v", ctx.Metrics())) + must.Eq(t, zero, node.Node) + + delete(zero.Attributes, "plugins.secrets.foo.version") + must.NoError(t, zero.ComputeClass()) + + stack = NewSystemStack(false, ctx) + stack.SetNodes(nodes) + stack.SetJob(job) + node = stack.Select(job.TaskGroups[0], selectOptions) + must.Nil(t, node) +} + func TestSystemStack_Select_ConstraintFilter(t *testing.T) { ci.Parallel(t) diff --git a/scheduler/util.go b/scheduler/util.go index 8fadb59db..9d7b03cb4 100644 --- a/scheduler/util.go +++ b/scheduler/util.go @@ -230,6 +230,9 @@ func tasksUpdated(jobA, jobB *structs.Job, taskGroup string) comparison { if !at.Vault.Equal(bt.Vault) { return difference("task vault", at.Vault, bt.Vault) } + if !slices.EqualFunc(at.Secrets, bt.Secrets, func(a, b *structs.Secret) bool { return a.Equal(b) }) { + return difference("task secrets", at.Secrets, bt.Secrets) + } if c := consulUpdated(at.Consul, bt.Consul); c.modified { return c } diff --git a/scheduler/util_test.go b/scheduler/util_test.go index 490fa35c3..bc2781583 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -440,6 +440,22 @@ func TestTasksUpdated(t *testing.T) { j32.TaskGroups[0].Tasks[0].VolumeMounts = nil must.True(t, tasksUpdated(j31, j32, name).modified) + + j33 := mock.Job() + j33 = j32.Copy() + + must.False(t, tasksUpdated(j32, j33, name).modified) + + // Add a task secret + j33.TaskGroups[0].Tasks[0].Secrets = append(j32.TaskGroups[0].Tasks[0].Secrets, + &structs.Secret{ + Name: "mysecret", + Provider: "nomad", + Path: "/my/path", + }) + + must.True(t, tasksUpdated(j32, j33, name).modified) + } func TestTasksUpdated_connectServiceUpdated(t *testing.T) {