artifact: add client toggle to disable filesystem isolation (#15503)

This PR adds the client config option for turning off filesystem isolation,
applicable on Linux systems where filesystem isolation is possible and
enabled by default.

```hcl
client{
  artifact {
    disable_filesystem_isolation = <bool:false>
  }
}
```

Closes #15496
This commit is contained in:
Seth Hoenig
2022-12-08 12:29:23 -06:00
committed by GitHub
parent 6732761d91
commit 990537e8ba
10 changed files with 216 additions and 177 deletions

View File

@@ -21,12 +21,13 @@ import (
// e.g. https://www.opencve.io/cve/CVE-2022-41716
type parameters struct {
// Config
HTTPReadTimeout time.Duration `json:"http_read_timeout"`
HTTPMaxBytes int64 `json:"http_max_bytes"`
GCSTimeout time.Duration `json:"gcs_timeout"`
GitTimeout time.Duration `json:"git_timeout"`
HgTimeout time.Duration `json:"hg_timeout"`
S3Timeout time.Duration `json:"s3_timeout"`
HTTPReadTimeout time.Duration `json:"http_read_timeout"`
HTTPMaxBytes int64 `json:"http_max_bytes"`
GCSTimeout time.Duration `json:"gcs_timeout"`
GitTimeout time.Duration `json:"git_timeout"`
HgTimeout time.Duration `json:"hg_timeout"`
S3Timeout time.Duration `json:"s3_timeout"`
DisableFilesystemIsolation bool `json:"disable_filesystem_isolation"`
// Artifact
Mode getter.ClientMode `json:"artifact_mode"`
@@ -85,6 +86,8 @@ func (p *parameters) Equal(o *parameters) bool {
return false
case p.S3Timeout != o.S3Timeout:
return false
case p.DisableFilesystemIsolation != o.DisableFilesystemIsolation:
return false
case p.Mode != o.Mode:
return false
case p.Source != o.Source:

View File

@@ -19,6 +19,7 @@ const paramsAsJSON = `
"git_timeout": 3000000000,
"hg_timeout": 4000000000,
"s3_timeout": 5000000000,
"disable_filesystem_isolation": true,
"artifact_mode": 2,
"artifact_source": "https://example.com/file.txt",
"artifact_destination": "local/out.txt",
@@ -29,12 +30,13 @@ const paramsAsJSON = `
}`
var paramsAsStruct = &parameters{
HTTPReadTimeout: 1 * time.Second,
HTTPMaxBytes: 2000,
GCSTimeout: 2 * time.Second,
GitTimeout: 3 * time.Second,
HgTimeout: 4 * time.Second,
S3Timeout: 5 * time.Second,
HTTPReadTimeout: 1 * time.Second,
HTTPMaxBytes: 2000,
GCSTimeout: 2 * time.Second,
GitTimeout: 3 * time.Second,
HgTimeout: 4 * time.Second,
S3Timeout: 5 * time.Second,
DisableFilesystemIsolation: true,
Mode: getter.ClientModeFile,
Source: "https://example.com/file.txt",

View File

@@ -39,20 +39,26 @@ func (s *Sandbox) Get(env interfaces.EnvReplacer, artifact *structs.TaskArtifact
dir := getTaskDir(env)
params := &parameters{
HTTPReadTimeout: s.ac.HTTPReadTimeout,
HTTPMaxBytes: s.ac.HTTPMaxBytes,
GCSTimeout: s.ac.GCSTimeout,
GitTimeout: s.ac.GitTimeout,
HgTimeout: s.ac.HgTimeout,
S3Timeout: s.ac.S3Timeout,
Mode: mode,
Source: source,
Destination: destination,
Headers: headers,
TaskDir: dir,
// downloader configuration
HTTPReadTimeout: s.ac.HTTPReadTimeout,
HTTPMaxBytes: s.ac.HTTPMaxBytes,
GCSTimeout: s.ac.GCSTimeout,
GitTimeout: s.ac.GitTimeout,
HgTimeout: s.ac.HgTimeout,
S3Timeout: s.ac.S3Timeout,
DisableFilesystemIsolation: s.ac.DisableFilesystemIsolation,
// artifact configuration
Mode: mode,
Source: source,
Destination: destination,
Headers: headers,
// task environment
TaskDir: dir,
}
if err = runCmd(params, s.logger); err != nil {
if err = s.runCmd(params); err != nil {
return err
}
return nil

View File

@@ -10,7 +10,6 @@ import (
"strings"
"github.com/hashicorp/go-getter"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/client/interfaces"
"github.com/hashicorp/nomad/helper/subproc"
"github.com/hashicorp/nomad/nomad/structs"
@@ -96,7 +95,7 @@ func getTaskDir(env interfaces.EnvReplacer) string {
return filepath.Dir(p)
}
func runCmd(env *parameters, logger hclog.Logger) error {
func (s *Sandbox) runCmd(env *parameters) error {
// find the nomad process
bin := subproc.Self()
@@ -115,13 +114,13 @@ func runCmd(env *parameters, logger hclog.Logger) error {
// start & wait for the subprocess to terminate
if err := cmd.Run(); err != nil {
subproc.Log(output, logger.Error)
subproc.Log(output, s.logger.Error)
return &Error{
URL: env.Source,
Err: fmt.Errorf("getter subprocess failed: %v", err),
Recoverable: true,
}
}
subproc.Log(output, logger.Debug)
subproc.Log(output, s.logger.Debug)
return nil
}

View File

@@ -30,10 +30,11 @@ func init() {
subproc.SetExpiration(ctx)
// sandbox the host filesystem for this process
dir := env.TaskDir
if err := lockdown(dir); err != nil {
subproc.Print("failed to sandbox getter process: %v", err)
return subproc.ExitFailure
if !env.DisableFilesystemIsolation {
if err := lockdown(env.TaskDir); err != nil {
subproc.Print("failed to sandbox %s process: %v", SubCommand, err)
return subproc.ExitFailure
}
}
// create the go-getter client

View File

@@ -18,50 +18,52 @@ type ArtifactConfig struct {
GitTimeout time.Duration
HgTimeout time.Duration
S3Timeout time.Duration
DisableFilesystemIsolation bool
}
// ArtifactConfigFromAgent creates a new internal readonly copy of the client
// agent's ArtifactConfig. The config should have already been validated.
func ArtifactConfigFromAgent(c *config.ArtifactConfig) (*ArtifactConfig, error) {
newConfig := &ArtifactConfig{}
t, err := time.ParseDuration(*c.HTTPReadTimeout)
httpReadTimeout, err := time.ParseDuration(*c.HTTPReadTimeout)
if err != nil {
return nil, fmt.Errorf("error parsing HTTPReadTimeout: %w", err)
}
newConfig.HTTPReadTimeout = t
s, err := humanize.ParseBytes(*c.HTTPMaxSize)
httpMaxSize, err := humanize.ParseBytes(*c.HTTPMaxSize)
if err != nil {
return nil, fmt.Errorf("error parsing HTTPMaxSize: %w", err)
}
newConfig.HTTPMaxBytes = int64(s)
t, err = time.ParseDuration(*c.GCSTimeout)
gcsTimeout, err := time.ParseDuration(*c.GCSTimeout)
if err != nil {
return nil, fmt.Errorf("error parsing GCSTimeout: %w", err)
}
newConfig.GCSTimeout = t
t, err = time.ParseDuration(*c.GitTimeout)
gitTimeout, err := time.ParseDuration(*c.GitTimeout)
if err != nil {
return nil, fmt.Errorf("error parsing GitTimeout: %w", err)
}
newConfig.GitTimeout = t
t, err = time.ParseDuration(*c.HgTimeout)
hgTimeout, err := time.ParseDuration(*c.HgTimeout)
if err != nil {
return nil, fmt.Errorf("error parsing HgTimeout: %w", err)
}
newConfig.HgTimeout = t
t, err = time.ParseDuration(*c.S3Timeout)
s3Timeout, err := time.ParseDuration(*c.S3Timeout)
if err != nil {
return nil, fmt.Errorf("error parsing S3Timeout: %w", err)
}
newConfig.S3Timeout = t
return newConfig, nil
return &ArtifactConfig{
HTTPReadTimeout: httpReadTimeout,
HTTPMaxBytes: int64(httpMaxSize),
GCSTimeout: gcsTimeout,
GitTimeout: gitTimeout,
HgTimeout: hgTimeout,
S3Timeout: s3Timeout,
DisableFilesystemIsolation: *c.DisableFilesystemIsolation,
}, nil
}
func (a *ArtifactConfig) Copy() *ArtifactConfig {

View File

@@ -34,65 +34,68 @@ type ArtifactConfig struct {
// S3Timeout is the duration in which an S3 operation must complete or
// it will be canceled. Defaults to 30m.
S3Timeout *string `hcl:"s3_timeout"`
// DisableFilesystemIsolation will turn off the security feature where the
// artifact downloader can write only to the task sandbox directory, and can
// read only from specific locations on the host filesystem.
DisableFilesystemIsolation *bool `hcl:"disable_filesystem_isolation"`
}
func (a *ArtifactConfig) Copy() *ArtifactConfig {
if a == nil {
return nil
}
newCopy := &ArtifactConfig{}
if a.HTTPReadTimeout != nil {
newCopy.HTTPReadTimeout = pointer.Of(*a.HTTPReadTimeout)
return &ArtifactConfig{
HTTPReadTimeout: pointer.Copy(a.HTTPReadTimeout),
HTTPMaxSize: pointer.Copy(a.HTTPMaxSize),
GCSTimeout: pointer.Copy(a.GCSTimeout),
GitTimeout: pointer.Copy(a.GitTimeout),
HgTimeout: pointer.Copy(a.HgTimeout),
S3Timeout: pointer.Copy(a.S3Timeout),
DisableFilesystemIsolation: pointer.Copy(a.DisableFilesystemIsolation),
}
if a.HTTPMaxSize != nil {
newCopy.HTTPMaxSize = pointer.Of(*a.HTTPMaxSize)
}
if a.GCSTimeout != nil {
newCopy.GCSTimeout = pointer.Of(*a.GCSTimeout)
}
if a.GitTimeout != nil {
newCopy.GitTimeout = pointer.Of(*a.GitTimeout)
}
if a.HgTimeout != nil {
newCopy.HgTimeout = pointer.Of(*a.HgTimeout)
}
if a.S3Timeout != nil {
newCopy.S3Timeout = pointer.Of(*a.S3Timeout)
}
return newCopy
}
func (a *ArtifactConfig) Merge(o *ArtifactConfig) *ArtifactConfig {
if a == nil {
switch {
case a == nil:
return o.Copy()
}
if o == nil {
case o == nil:
return a.Copy()
default:
return &ArtifactConfig{
HTTPReadTimeout: pointer.Merge(a.HTTPReadTimeout, o.HTTPReadTimeout),
HTTPMaxSize: pointer.Merge(a.HTTPMaxSize, o.HTTPMaxSize),
GCSTimeout: pointer.Merge(a.GCSTimeout, o.GCSTimeout),
GitTimeout: pointer.Merge(a.GitTimeout, o.GitTimeout),
HgTimeout: pointer.Merge(a.HgTimeout, o.HgTimeout),
S3Timeout: pointer.Merge(a.S3Timeout, o.S3Timeout),
DisableFilesystemIsolation: pointer.Merge(a.DisableFilesystemIsolation, o.DisableFilesystemIsolation),
}
}
}
newCopy := a.Copy()
if o.HTTPReadTimeout != nil {
newCopy.HTTPReadTimeout = pointer.Of(*o.HTTPReadTimeout)
func (a *ArtifactConfig) Equal(o *ArtifactConfig) bool {
if a == nil || o == nil {
return a == o
}
if o.HTTPMaxSize != nil {
newCopy.HTTPMaxSize = pointer.Of(*o.HTTPMaxSize)
switch {
case !pointer.Eq(a.HTTPReadTimeout, o.HTTPReadTimeout):
return false
case !pointer.Eq(a.HTTPMaxSize, o.HTTPMaxSize):
return false
case !pointer.Eq(a.GCSTimeout, o.GCSTimeout):
return false
case !pointer.Eq(a.GitTimeout, o.GitTimeout):
return false
case !pointer.Eq(a.HgTimeout, o.HgTimeout):
return false
case !pointer.Eq(a.S3Timeout, o.S3Timeout):
return false
case !pointer.Eq(a.DisableFilesystemIsolation, o.DisableFilesystemIsolation):
return false
}
if o.GCSTimeout != nil {
newCopy.GCSTimeout = pointer.Of(*o.GCSTimeout)
}
if o.GitTimeout != nil {
newCopy.GitTimeout = pointer.Of(*o.GitTimeout)
}
if o.HgTimeout != nil {
newCopy.HgTimeout = pointer.Of(*o.HgTimeout)
}
if o.S3Timeout != nil {
newCopy.S3Timeout = pointer.Of(*o.S3Timeout)
}
return newCopy
return true
}
func (a *ArtifactConfig) Validate() error {
@@ -154,6 +157,10 @@ func (a *ArtifactConfig) Validate() error {
return fmt.Errorf("s3_timeout must be > 0")
}
if a.DisableFilesystemIsolation == nil {
return fmt.Errorf("disable_filesystem_isolation must be set")
}
return nil
}
@@ -182,5 +189,8 @@ func DefaultArtifactConfig() *ArtifactConfig {
// Timeout for S3 operations. Must be long enough to
// accommodate large/slow downloads.
S3Timeout: pointer.Of("30m"),
// Toggle for disabling filesystem isolation, where available.
DisableFilesystemIsolation: pointer.Of(false),
}
}

View File

@@ -5,7 +5,7 @@ import (
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/helper/pointer"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
func TestArtifactConfig_Copy(t *testing.T) {
@@ -13,13 +13,14 @@ func TestArtifactConfig_Copy(t *testing.T) {
a := DefaultArtifactConfig()
b := a.Copy()
require.Equal(t, a, b)
must.Equal(t, a, b)
must.Equal(t, b, a)
b.HTTPReadTimeout = pointer.Of("5m")
b.HTTPMaxSize = pointer.Of("2MB")
b.GitTimeout = pointer.Of("3m")
b.HgTimeout = pointer.Of("2m")
require.NotEqual(t, a, b)
must.NotEqual(t, a, b)
}
func TestArtifactConfig_Merge(t *testing.T) {
@@ -34,68 +35,75 @@ func TestArtifactConfig_Merge(t *testing.T) {
{
name: "merge all fields",
source: &ArtifactConfig{
HTTPReadTimeout: pointer.Of("30m"),
HTTPMaxSize: pointer.Of("100GB"),
GCSTimeout: pointer.Of("30m"),
GitTimeout: pointer.Of("30m"),
HgTimeout: pointer.Of("30m"),
S3Timeout: pointer.Of("30m"),
HTTPReadTimeout: pointer.Of("30m"),
HTTPMaxSize: pointer.Of("100GB"),
GCSTimeout: pointer.Of("30m"),
GitTimeout: pointer.Of("30m"),
HgTimeout: pointer.Of("30m"),
S3Timeout: pointer.Of("30m"),
DisableFilesystemIsolation: pointer.Of(false),
},
other: &ArtifactConfig{
HTTPReadTimeout: pointer.Of("5m"),
HTTPMaxSize: pointer.Of("2GB"),
GCSTimeout: pointer.Of("1m"),
GitTimeout: pointer.Of("2m"),
HgTimeout: pointer.Of("3m"),
S3Timeout: pointer.Of("4m"),
HTTPReadTimeout: pointer.Of("5m"),
HTTPMaxSize: pointer.Of("2GB"),
GCSTimeout: pointer.Of("1m"),
GitTimeout: pointer.Of("2m"),
HgTimeout: pointer.Of("3m"),
S3Timeout: pointer.Of("4m"),
DisableFilesystemIsolation: pointer.Of(true),
},
expected: &ArtifactConfig{
HTTPReadTimeout: pointer.Of("5m"),
HTTPMaxSize: pointer.Of("2GB"),
GCSTimeout: pointer.Of("1m"),
GitTimeout: pointer.Of("2m"),
HgTimeout: pointer.Of("3m"),
S3Timeout: pointer.Of("4m"),
HTTPReadTimeout: pointer.Of("5m"),
HTTPMaxSize: pointer.Of("2GB"),
GCSTimeout: pointer.Of("1m"),
GitTimeout: pointer.Of("2m"),
HgTimeout: pointer.Of("3m"),
S3Timeout: pointer.Of("4m"),
DisableFilesystemIsolation: pointer.Of(true),
},
},
{
name: "null source",
source: nil,
other: &ArtifactConfig{
HTTPReadTimeout: pointer.Of("5m"),
HTTPMaxSize: pointer.Of("2GB"),
GCSTimeout: pointer.Of("1m"),
GitTimeout: pointer.Of("2m"),
HgTimeout: pointer.Of("3m"),
S3Timeout: pointer.Of("4m"),
HTTPReadTimeout: pointer.Of("5m"),
HTTPMaxSize: pointer.Of("2GB"),
GCSTimeout: pointer.Of("1m"),
GitTimeout: pointer.Of("2m"),
HgTimeout: pointer.Of("3m"),
S3Timeout: pointer.Of("4m"),
DisableFilesystemIsolation: pointer.Of(true),
},
expected: &ArtifactConfig{
HTTPReadTimeout: pointer.Of("5m"),
HTTPMaxSize: pointer.Of("2GB"),
GCSTimeout: pointer.Of("1m"),
GitTimeout: pointer.Of("2m"),
HgTimeout: pointer.Of("3m"),
S3Timeout: pointer.Of("4m"),
HTTPReadTimeout: pointer.Of("5m"),
HTTPMaxSize: pointer.Of("2GB"),
GCSTimeout: pointer.Of("1m"),
GitTimeout: pointer.Of("2m"),
HgTimeout: pointer.Of("3m"),
S3Timeout: pointer.Of("4m"),
DisableFilesystemIsolation: pointer.Of(true),
},
},
{
name: "null other",
source: &ArtifactConfig{
HTTPReadTimeout: pointer.Of("30m"),
HTTPMaxSize: pointer.Of("100GB"),
GCSTimeout: pointer.Of("30m"),
GitTimeout: pointer.Of("30m"),
HgTimeout: pointer.Of("30m"),
S3Timeout: pointer.Of("30m"),
HTTPReadTimeout: pointer.Of("30m"),
HTTPMaxSize: pointer.Of("100GB"),
GCSTimeout: pointer.Of("30m"),
GitTimeout: pointer.Of("30m"),
HgTimeout: pointer.Of("30m"),
S3Timeout: pointer.Of("30m"),
DisableFilesystemIsolation: pointer.Of(true),
},
other: nil,
expected: &ArtifactConfig{
HTTPReadTimeout: pointer.Of("30m"),
HTTPMaxSize: pointer.Of("100GB"),
GCSTimeout: pointer.Of("30m"),
GitTimeout: pointer.Of("30m"),
HgTimeout: pointer.Of("30m"),
S3Timeout: pointer.Of("30m"),
HTTPReadTimeout: pointer.Of("30m"),
HTTPMaxSize: pointer.Of("100GB"),
GCSTimeout: pointer.Of("30m"),
GitTimeout: pointer.Of("30m"),
HgTimeout: pointer.Of("30m"),
S3Timeout: pointer.Of("30m"),
DisableFilesystemIsolation: pointer.Of(true),
},
},
}
@@ -103,7 +111,7 @@ func TestArtifactConfig_Merge(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := tc.source.Merge(tc.other)
require.Equal(t, tc.expected, got)
must.Equal(t, tc.expected, got)
})
}
}
@@ -112,224 +120,224 @@ func TestArtifactConfig_Validate(t *testing.T) {
ci.Parallel(t)
testCases := []struct {
name string
config func(*ArtifactConfig)
expectedError string
name string
config func(*ArtifactConfig)
expErr string
}{
{
name: "default config is valid",
config: nil,
expectedError: "",
name: "default config is valid",
config: nil,
expErr: "",
},
{
name: "missing http read timeout",
config: func(a *ArtifactConfig) {
a.HTTPReadTimeout = nil
},
expectedError: "http_read_timeout must be set",
expErr: "http_read_timeout must be set",
},
{
name: "http read timeout is invalid",
config: func(a *ArtifactConfig) {
a.HTTPReadTimeout = pointer.Of("invalid")
},
expectedError: "http_read_timeout not a valid duration",
expErr: "http_read_timeout not a valid duration",
},
{
name: "http read timeout is empty",
config: func(a *ArtifactConfig) {
a.HTTPReadTimeout = pointer.Of("")
},
expectedError: "http_read_timeout not a valid duration",
expErr: "http_read_timeout not a valid duration",
},
{
name: "http read timeout is zero",
config: func(a *ArtifactConfig) {
a.HTTPReadTimeout = pointer.Of("0")
},
expectedError: "",
expErr: "",
},
{
name: "http read timeout is negative",
config: func(a *ArtifactConfig) {
a.HTTPReadTimeout = pointer.Of("-10m")
},
expectedError: "http_read_timeout must be > 0",
expErr: "http_read_timeout must be > 0",
},
{
name: "http max size is missing",
config: func(a *ArtifactConfig) {
a.HTTPMaxSize = nil
},
expectedError: "http_max_size must be set",
expErr: "http_max_size must be set",
},
{
name: "http max size is invalid",
config: func(a *ArtifactConfig) {
a.HTTPMaxSize = pointer.Of("invalid")
},
expectedError: "http_max_size not a valid size",
expErr: "http_max_size not a valid size",
},
{
name: "http max size is empty",
config: func(a *ArtifactConfig) {
a.HTTPMaxSize = pointer.Of("")
},
expectedError: "http_max_size not a valid size",
expErr: "http_max_size not a valid size",
},
{
name: "http max size is zero",
config: func(a *ArtifactConfig) {
a.HTTPMaxSize = pointer.Of("0")
},
expectedError: "",
expErr: "",
},
{
name: "http max size is negative",
config: func(a *ArtifactConfig) {
a.HTTPMaxSize = pointer.Of("-l0MB")
},
expectedError: "http_max_size not a valid size",
expErr: "http_max_size not a valid size",
},
{
name: "gcs timeout is missing",
config: func(a *ArtifactConfig) {
a.GCSTimeout = nil
},
expectedError: "gcs_timeout must be set",
expErr: "gcs_timeout must be set",
},
{
name: "gcs timeout is invalid",
config: func(a *ArtifactConfig) {
a.GCSTimeout = pointer.Of("invalid")
},
expectedError: "gcs_timeout not a valid duration",
expErr: "gcs_timeout not a valid duration",
},
{
name: "gcs timeout is empty",
config: func(a *ArtifactConfig) {
a.GCSTimeout = pointer.Of("")
},
expectedError: "gcs_timeout not a valid duration",
expErr: "gcs_timeout not a valid duration",
},
{
name: "gcs timeout is zero",
config: func(a *ArtifactConfig) {
a.GCSTimeout = pointer.Of("0")
},
expectedError: "",
expErr: "",
},
{
name: "gcs timeout is negative",
config: func(a *ArtifactConfig) {
a.GCSTimeout = pointer.Of("-l0m")
},
expectedError: "gcs_timeout not a valid duration",
expErr: "gcs_timeout not a valid duration",
},
{
name: "git timeout is missing",
config: func(a *ArtifactConfig) {
a.GitTimeout = nil
},
expectedError: "git_timeout must be set",
expErr: "git_timeout must be set",
},
{
name: "git timeout is invalid",
config: func(a *ArtifactConfig) {
a.GitTimeout = pointer.Of("invalid")
},
expectedError: "git_timeout not a valid duration",
expErr: "git_timeout not a valid duration",
},
{
name: "git timeout is empty",
config: func(a *ArtifactConfig) {
a.GitTimeout = pointer.Of("")
},
expectedError: "git_timeout not a valid duration",
expErr: "git_timeout not a valid duration",
},
{
name: "git timeout is zero",
config: func(a *ArtifactConfig) {
a.GitTimeout = pointer.Of("0")
},
expectedError: "",
expErr: "",
},
{
name: "git timeout is negative",
config: func(a *ArtifactConfig) {
a.GitTimeout = pointer.Of("-l0m")
},
expectedError: "git_timeout not a valid duration",
expErr: "git_timeout not a valid duration",
},
{
name: "hg timeout is missing",
config: func(a *ArtifactConfig) {
a.HgTimeout = nil
},
expectedError: "hg_timeout must be set",
expErr: "hg_timeout must be set",
},
{
name: "hg timeout is invalid",
config: func(a *ArtifactConfig) {
a.HgTimeout = pointer.Of("invalid")
},
expectedError: "hg_timeout not a valid duration",
expErr: "hg_timeout not a valid duration",
},
{
name: "hg timeout is empty",
config: func(a *ArtifactConfig) {
a.HgTimeout = pointer.Of("")
},
expectedError: "hg_timeout not a valid duration",
expErr: "hg_timeout not a valid duration",
},
{
name: "hg timeout is zero",
config: func(a *ArtifactConfig) {
a.HgTimeout = pointer.Of("0")
},
expectedError: "",
expErr: "",
},
{
name: "hg timeout is negative",
config: func(a *ArtifactConfig) {
a.HgTimeout = pointer.Of("-l0m")
},
expectedError: "hg_timeout not a valid duration",
expErr: "hg_timeout not a valid duration",
},
{
name: "s3 timeout is missing",
config: func(a *ArtifactConfig) {
a.S3Timeout = nil
},
expectedError: "s3_timeout must be set",
expErr: "s3_timeout must be set",
},
{
name: "s3 timeout is invalid",
config: func(a *ArtifactConfig) {
a.S3Timeout = pointer.Of("invalid")
},
expectedError: "s3_timeout not a valid duration",
expErr: "s3_timeout not a valid duration",
},
{
name: "s3 timeout is empty",
config: func(a *ArtifactConfig) {
a.S3Timeout = pointer.Of("")
},
expectedError: "s3_timeout not a valid duration",
expErr: "s3_timeout not a valid duration",
},
{
name: "s3 timeout is zero",
config: func(a *ArtifactConfig) {
a.S3Timeout = pointer.Of("0")
},
expectedError: "",
expErr: "",
},
{
name: "s3 timeout is negative",
config: func(a *ArtifactConfig) {
a.S3Timeout = pointer.Of("-l0m")
},
expectedError: "s3_timeout not a valid duration",
expErr: "s3_timeout not a valid duration",
},
}
@@ -341,11 +349,11 @@ func TestArtifactConfig_Validate(t *testing.T) {
}
err := a.Validate()
if tc.expectedError != "" {
require.Error(t, err)
require.ErrorContains(t, err, tc.expectedError)
if tc.expErr != "" {
must.Error(t, err)
must.StrContains(t, err.Error(), tc.expErr)
} else {
require.NoError(t, err)
must.NoError(t, err)
}
})
}

View File

@@ -378,6 +378,10 @@ see the [drivers documentation](/docs/drivers).
S3 operation must complete before it is canceled. Set to `0` to not enforce a
limit.
- `disable_filesystem_isolation` `(bool: false)` - Specifies whether filesystem
isolation should be disabled for artifact downloads. Applies only to systems
where filesystem isolation via [landlock] is possible (Linux kernel 5.13+).
### `template` Parameters
- `function_denylist` `([]string: ["plugin", "writeToFile"])` - Specifies a
@@ -632,3 +636,4 @@ client {
[metadata_constraint]: /docs/job-specification/constraint#user-specified-metadata 'Nomad User-Specified Metadata Constraint Example'
[task working directory]: /docs/runtime/environment#task-directories 'Task directories'
[go-sockaddr/template]: https://godoc.org/github.com/hashicorp/go-sockaddr/template
[landlock]: https://docs.kernel.org/userspace-api/landlock.html

View File

@@ -57,6 +57,9 @@ USERPROFILE=<inherit $USERPROFILE>
Configuration of the artifact downloader should happen through the [`options`][artifact_params]
and [`headers`][artifact_params] fields of the `artifact` block.
The use of filesystem isolation can be disabled in Client configuration by
setting [`disable_filesystem_isolation`][fs_isolation].
## Nomad 1.4.0
#### Possible Panic During Upgrades
@@ -1577,4 +1580,4 @@ deleted and then Nomad 0.3.0 can be launched.
[gh_issue]: https://github.com/hashicorp/nomad/issues/new/choose
[upgrade process]: /docs/upgrade#upgrade-process
[landlock]: https://docs.kernel.org/userspace-api/landlock.html
[fs_isolation]: /docs/configuration/client#disable_filesystem_isolation