From 5989d5862a53c738b8fa09bd7a081f88f3f64d29 Mon Sep 17 00:00:00 2001 From: James Rasell Date: Fri, 25 Jul 2025 11:44:08 +0200 Subject: [PATCH 01/27] ci: Update golangci-lint to v2 and fix highlighted issues. (#26334) --- .golangci.yml | 123 ++++++++---------- GNUmakefile | 2 +- api/agent.go | 2 +- api/csi.go | 22 ++-- api/internal/testutil/discover/discover.go | 4 +- api/jobs.go | 2 +- api/nodes.go | 4 +- api/quota.go | 2 +- api/resources.go | 4 +- api/tasks.go | 20 +-- client/allocdir/alloc_dir.go | 120 ++++++++--------- client/allocdir/task_dir.go | 22 ++-- client/lib/cgroupslib/editor.go | 4 +- client/lib/nsutil/ns_linux.go | 5 +- client/lib/numalib/topology.go | 14 +- command/agent/config.go | 20 +-- command/agent/consul/catalog_testing.go | 2 +- command/agent/consul/service_client.go | 10 +- .../shared/executor/procstats/list_default.go | 2 +- e2e/v3/jobs3/jobs3.go | 2 +- e2e/v3/namespaces3/namespaces3.go | 8 +- helper/group/group.go | 4 +- nomad/job_endpoint_hook_consul_ce.go | 4 +- nomad/job_endpoint_hook_vault_ce.go | 2 +- nomad/lock/ttl.go | 2 +- nomad/operator_endpoint.go | 2 +- nomad/structs/config/sentinel.go | 4 +- nomad/structs/config/ui.go | 20 +-- nomad/structs/devices.go | 7 +- nomad/structs/diff.go | 10 +- nomad/structs/keyring.go | 2 +- nomad/structs/numa.go | 7 +- nomad/structs/variables.go | 4 +- nomad/worker.go | 6 +- plugins/base/base.go | 12 +- scheduler/feasible/device.go | 6 +- scheduler/reconciler/filters.go | 16 +-- scheduler/tests/testing.go | 6 +- testutil/tls.go | 6 +- testutil/wait.go | 4 +- version/version.go | 32 ++--- 41 files changed, 275 insertions(+), 275 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 81a4e92b8..276fda53b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,8 +1,8 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 +version: "2" run: - # Timeout for analysis. timeout: 10m # Modules download mode (do not modify go.mod) @@ -11,82 +11,71 @@ run: # Exclude test files tests: false - # Skip ui and generated files - issues: - exclude-files: - - ".*\\.generated\\.go$" - - ".*bindata_assetfs\\.go$" - skip-dirs: - - ui - -# Output configuration options output: formats: - - format: colored-line-number + text: path: stdout - - # print lines of code with issue, default is true - print-issued-lines: true - - # print linter name in the end of issue text, default is true - print-linter-name: true - -# all available settings of specific linters -linters-settings: - errcheck: - # report about not checking of errors in type assetions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: false - - exclude-functions: - - io.* - - fmt.* - - # path to a file containing a list of functions to exclude from checking - # see https://github.com/kisielk/errcheck#excluding-functions for details - # exclude: /path/to/file.txt - govet: - # report about shadowed variables - disable: - - shadow - gofmt: - # simplify code: gofmt with `-s` option, true by default - simplify: true - gocritic: - disabled-checks: - - commentFormatting - - deprecatedComment - staticcheck: - # I(jrasell) will work on enabling additional checks when possible. - checks: ["ST1020", "ST1016"] - -issues: - exclude: - - ifElseChain - - singleCaseSwitch - - assignOp - - unlambda + print-linter-name: true + print-issued-lines: true linters: - disable-all: true + default: none enable: - - goimports - - gocritic - - misspell - - govet - - ineffassign - - unconvert - - gofmt - - gosimple - - staticcheck - asasalint - asciicheck - bidichk - bodyclose + - copyloopvar - dogsled - durationcheck - # - errchkjson (todo) - # - errorlint (todo) - - copyloopvar + - gocritic + - govet + - ineffassign + - misspell + - staticcheck + - unconvert - usestdlibvars - fast: false + settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + exclude-functions: + - io.* + - fmt.* + gocritic: + disabled-checks: + - commentFormatting + - deprecatedComment + govet: + disable: + - shadow + staticcheck: + checks: + - ST1016 + - ST1020 + exclusions: + rules: + - path: (.+)\.go$ + text: ifElseChain + - path: (.+)\.go$ + text: singleCaseSwitch + - path: (.+)\.go$ + text: assignOp + - path: (.+)\.go$ + text: unlambda + paths: + - ".*\\.generated\\.go$" + - ".*bindata_assetfs\\.go$" + +formatters: + enable: + - gofmt + - goimports + settings: + gofmt: + simplify: true + exclusions: + paths: + - ".*\\.generated\\.go$" + - ".*bindata_assetfs\\.go$" diff --git a/GNUmakefile b/GNUmakefile index 4a4b83307..346d1069a 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -143,7 +143,7 @@ deps: ## Install build and development dependencies .PHONY: lint-deps lint-deps: ## Install linter dependencies @echo "==> Updating linter dependencies..." - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.64.5 + go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.3.0 go install github.com/client9/misspell/cmd/misspell@v0.3.4 go install github.com/hashicorp/go-hclog/hclogvet@feaf6d2ec20fd895e711195c99e3fde93a68afc5 diff --git a/api/agent.go b/api/agent.go index 459a62d8d..295d159a0 100644 --- a/api/agent.go +++ b/api/agent.go @@ -156,7 +156,7 @@ func (a *Agent) Members() (*ServerMembers, error) { return resp, nil } -// Members is used to query all of the known server members +// MembersOpts is used to query all of the known server members // with the ability to set QueryOptions func (a *Agent) MembersOpts(opts *QueryOptions) (*ServerMembers, error) { var resp *ServerMembers diff --git a/api/csi.go b/api/csi.go index 492b011aa..517aac3bb 100644 --- a/api/csi.go +++ b/api/csi.go @@ -102,10 +102,11 @@ func (v *CSIVolumes) Create(vol *CSIVolume, w *WriteOptions) ([]*CSIVolume, *Wri return resp.Volumes, meta, err } -// DEPRECATED: will be removed in Nomad 1.4.0 // Delete deletes a CSI volume from an external storage provider. The ID // passed as an argument here is for the storage provider's ID, so a volume // that's already been deregistered can be deleted. +// +// Deprecated: will be removed in Nomad 1.4.0 func (v *CSIVolumes) Delete(externalVolID string, w *WriteOptions) error { _, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v/delete", url.PathEscape(externalVolID)), nil, nil, w) return err @@ -184,8 +185,9 @@ func (v *CSIVolumes) ListSnapshotsOpts(req *CSISnapshotListRequest) (*CSISnapsho return resp, qm, nil } -// DEPRECATED: will be removed in Nomad 1.4.0 // ListSnapshots lists external storage volume snapshots. +// +// Deprecated: will be removed in Nomad 1.4.0 func (v *CSIVolumes) ListSnapshots(pluginID string, secrets string, q *QueryOptions) (*CSISnapshotListResponse, *QueryMeta, error) { var resp *CSISnapshotListResponse @@ -269,26 +271,26 @@ func (o *CSIMountOptions) Merge(p *CSIMountOptions) { // API or in Nomad's logs. type CSISecrets map[string]string -func (q *QueryOptions) SetHeadersFromCSISecrets(secrets CSISecrets) { +func (o *QueryOptions) SetHeadersFromCSISecrets(secrets CSISecrets) { pairs := []string{} for k, v := range secrets { pairs = append(pairs, fmt.Sprintf("%v=%v", k, v)) } - if q.Headers == nil { - q.Headers = map[string]string{} + if o.Headers == nil { + o.Headers = map[string]string{} } - q.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",") + o.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",") } -func (w *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) { +func (o *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) { pairs := []string{} for k, v := range secrets { pairs = append(pairs, fmt.Sprintf("%v=%v", k, v)) } - if w.Headers == nil { - w.Headers = map[string]string{} + if o.Headers == nil { + o.Headers = map[string]string{} } - w.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",") + o.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",") } // CSIVolume is used for serialization, see also nomad/structs/csi.go diff --git a/api/internal/testutil/discover/discover.go b/api/internal/testutil/discover/discover.go index 33f639d2b..da9e720f5 100644 --- a/api/internal/testutil/discover/discover.go +++ b/api/internal/testutil/discover/discover.go @@ -12,8 +12,8 @@ import ( "strings" ) -// Checks the current executable, then $GOPATH/bin, and finally the CWD, in that -// order. If it can't be found, an error is returned. +// NomadExecutable checks the current executable, then $GOPATH/bin, and finally +// the CWD, in that order. If it can't be found, an error is returned. func NomadExecutable() (string, error) { nomadExe := "nomad" if runtime.GOOS == "windows" { diff --git a/api/jobs.go b/api/jobs.go index 9d007d0f2..75a9cfa60 100644 --- a/api/jobs.go +++ b/api/jobs.go @@ -176,7 +176,7 @@ func (j *Jobs) List(q *QueryOptions) ([]*JobListStub, *QueryMeta, error) { return j.ListOptions(nil, q) } -// List is used to list all of the existing jobs. +// ListOptions is used to list all of the existing jobs. func (j *Jobs) ListOptions(opts *JobListOptions, q *QueryOptions) ([]*JobListStub, *QueryMeta, error) { var resp []*JobListStub diff --git a/api/nodes.go b/api/nodes.go index 9f3574d34..a779ee496 100644 --- a/api/nodes.go +++ b/api/nodes.go @@ -126,7 +126,7 @@ func (n *Nodes) UpdateDrain(nodeID string, spec *DrainSpec, markEligible bool, q return resp, err } -// UpdateDrainWithMeta is used to update the drain strategy for a given node. If +// UpdateDrainOpts is used to update the drain strategy for a given node. If // markEligible is true and the drain is being removed, the node will be marked // as having its scheduling being eligible func (n *Nodes) UpdateDrainOpts(nodeID string, opts *DrainOptions, q *WriteOptions) (*NodeDrainUpdateResponse, @@ -478,7 +478,7 @@ func (n *Nodes) GC(nodeID string, q *QueryOptions) error { return err } -// TODO Add tests +// GcAlloc - TODO Add tests func (n *Nodes) GcAlloc(allocID string, q *QueryOptions) error { path := fmt.Sprintf("/v1/client/allocation/%s/gc", allocID) _, err := n.client.query(path, nil, q) diff --git a/api/quota.go b/api/quota.go index 3423440d3..b761b4eb2 100644 --- a/api/quota.go +++ b/api/quota.go @@ -51,7 +51,7 @@ func (q *Quotas) ListUsage(qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error) return resp, qm, nil } -// PrefixList is used to do a PrefixList search over quota usages +// PrefixListUsage is used to do a PrefixList search over quota usages func (q *Quotas) PrefixListUsage(prefix string, qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error) { if qo == nil { qo = &QueryOptions{Prefix: prefix} diff --git a/api/resources.go b/api/resources.go index 35c3dc6e7..f45a46151 100644 --- a/api/resources.go +++ b/api/resources.go @@ -181,8 +181,10 @@ type NetworkResource struct { CNI *CNIConfig `hcl:"cni,block"` } +// Megabits should not be used. +// // COMPAT(0.13) -// XXX Deprecated. Please do not use. The method will be removed in Nomad +// Deprecated. Please do not use. The method will be removed in Nomad // 0.13 and is only being kept to allow any references to be removed before // then. func (n *NetworkResource) Megabits() int { diff --git a/api/tasks.go b/api/tasks.go index e6d77d830..57d868c87 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -310,14 +310,14 @@ func (r *ReschedulePolicy) Copy() *ReschedulePolicy { return nrp } -func (p *ReschedulePolicy) String() string { - if p == nil { +func (r *ReschedulePolicy) String() string { + if r == nil { return "" } - if *p.Unlimited { - return fmt.Sprintf("unlimited with %v delay, max_delay = %v", *p.DelayFunction, *p.MaxDelay) + if *r.Unlimited { + return fmt.Sprintf("unlimited with %v delay, max_delay = %v", *r.DelayFunction, *r.MaxDelay) } - return fmt.Sprintf("%v in %v with %v delay, max_delay = %v", *p.Attempts, *p.Interval, *p.DelayFunction, *p.MaxDelay) + return fmt.Sprintf("%v in %v with %v delay, max_delay = %v", *r.Attempts, *r.Interval, *r.DelayFunction, *r.MaxDelay) } // Spread is used to serialize task group allocation spread preferences @@ -664,7 +664,7 @@ func (g *TaskGroup) Constrain(c *Constraint) *TaskGroup { return g } -// AddMeta is used to add a meta k/v pair to a task group +// SetMeta is used to add a meta k/v pair to a task group func (g *TaskGroup) SetMeta(key, val string) *TaskGroup { if g.Meta == nil { g.Meta = make(map[string]string) @@ -697,7 +697,7 @@ func (g *TaskGroup) AddSpread(s *Spread) *TaskGroup { return g } -// AddSpread is used to add a new spread preference to a task group. +// ScalingPolicy is used to add a new scaling policy to a task group. func (g *TaskGroup) ScalingPolicy(sp *ScalingPolicy) *TaskGroup { g.Scaling = sp return g @@ -751,7 +751,7 @@ type TaskLifecycle struct { Sidecar bool `mapstructure:"sidecar" hcl:"sidecar,optional"` } -// Determine if lifecycle has user-input values +// Empty determines if lifecycle has user-input values func (l *TaskLifecycle) Empty() bool { return l == nil } @@ -1048,7 +1048,7 @@ func NewTask(name, driver string) *Task { } } -// Configure is used to configure a single k/v pair on +// SetConfig is used to configure a single k/v pair on // the task. func (t *Task) SetConfig(key string, val interface{}) *Task { if t.Config == nil { @@ -1073,7 +1073,7 @@ func (t *Task) Require(r *Resources) *Task { return t } -// Constraint adds a new constraints to a single task. +// Constrain adds a new constraints to a single task. func (t *Task) Constrain(c *Constraint) *Task { t.Constraints = append(t.Constraints, c) return t diff --git a/client/allocdir/alloc_dir.go b/client/allocdir/alloc_dir.go index 652484608..291ce6e5c 100644 --- a/client/allocdir/alloc_dir.go +++ b/client/allocdir/alloc_dir.go @@ -173,17 +173,17 @@ func NewAllocDir(logger hclog.Logger, clientAllocDir, clientMountsDir, allocID s } // NewTaskDir creates a new TaskDir and adds it to the AllocDirs TaskDirs map. -func (d *AllocDir) NewTaskDir(task *structs.Task) *TaskDir { - d.mu.Lock() - defer d.mu.Unlock() +func (a *AllocDir) NewTaskDir(task *structs.Task) *TaskDir { + a.mu.Lock() + defer a.mu.Unlock() secretsSize := 0 if task.Resources != nil { secretsSize = task.Resources.SecretsMB } - td := d.newTaskDir(task.Name, secretsSize) - d.TaskDirs[task.Name] = td + td := a.newTaskDir(task.Name, secretsSize) + a.TaskDirs[task.Name] = td return td } @@ -193,13 +193,13 @@ func (d *AllocDir) NewTaskDir(task *structs.Task) *TaskDir { // Since a valid tar may have been written even when an error occurs, a special // file "NOMAD-${ALLOC_ID}-ERROR.log" will be appended to the tar with the // error message as the contents. -func (d *AllocDir) Snapshot(w io.Writer) error { - d.mu.RLock() - defer d.mu.RUnlock() +func (a *AllocDir) Snapshot(w io.Writer) error { + a.mu.RLock() + defer a.mu.RUnlock() - allocDataDir := filepath.Join(d.SharedDir, SharedDataDir) + allocDataDir := filepath.Join(a.SharedDir, SharedDataDir) rootPaths := []string{allocDataDir} - for _, taskdir := range d.TaskDirs { + for _, taskdir := range a.TaskDirs { rootPaths = append(rootPaths, taskdir.LocalDir) } @@ -213,7 +213,7 @@ func (d *AllocDir) Snapshot(w io.Writer) error { // Include the path of the file name relative to the alloc dir // so that we can put the files in the right directories - relPath, err := filepath.Rel(d.AllocDir, path) + relPath, err := filepath.Rel(a.AllocDir, path) if err != nil { return err } @@ -256,14 +256,14 @@ func (d *AllocDir) Snapshot(w io.Writer) error { // directories in the archive for _, path := range rootPaths { if err := filepath.Walk(path, walkFn); err != nil { - allocID := filepath.Base(d.AllocDir) + allocID := filepath.Base(a.AllocDir) if writeErr := writeError(tw, allocID, err); writeErr != nil { // This could be bad; other side won't know // snapshotting failed. It could also just mean // the snapshotting side closed the connect // prematurely and won't try to use the tar // anyway. - d.logger.Warn("snapshotting failed and unable to write error marker", "error", writeErr) + a.logger.Warn("snapshotting failed and unable to write error marker", "error", writeErr) } return fmt.Errorf("failed to snapshot %s: %w", path, err) } @@ -273,20 +273,20 @@ func (d *AllocDir) Snapshot(w io.Writer) error { } // Move other alloc directory's shared path and local dir to this alloc dir. -func (d *AllocDir) Move(other Interface, tasks []*structs.Task) error { - d.mu.RLock() - if !d.built { +func (a *AllocDir) Move(other Interface, tasks []*structs.Task) error { + a.mu.RLock() + if !a.built { // Enforce the invariant that Build is called before Move - d.mu.RUnlock() - return fmt.Errorf("unable to move to %q - alloc dir is not built", d.AllocDir) + a.mu.RUnlock() + return fmt.Errorf("unable to move to %q - alloc dir is not built", a.AllocDir) } // Moving is slow and only reads immutable fields, so unlock during heavy IO - d.mu.RUnlock() + a.mu.RUnlock() // Move the data directory otherDataDir := filepath.Join(other.ShareDirPath(), SharedDataDir) - dataDir := filepath.Join(d.SharedDir, SharedDataDir) + dataDir := filepath.Join(a.SharedDir, SharedDataDir) if fileInfo, err := os.Stat(otherDataDir); fileInfo != nil && err == nil { os.Remove(dataDir) // remove an empty data dir if it exists if err := os.Rename(otherDataDir, dataDir); err != nil { @@ -302,7 +302,7 @@ func (d *AllocDir) Move(other Interface, tasks []*structs.Task) error { fileInfo, err := os.Stat(otherTaskLocal) if fileInfo != nil && err == nil { // TaskDirs haven't been built yet, so create it - newTaskDir := filepath.Join(d.AllocDir, task.Name) + newTaskDir := filepath.Join(a.AllocDir, task.Name) if err := os.MkdirAll(newTaskDir, fileMode777); err != nil { return fmt.Errorf("error creating task %q dir: %w", task.Name, err) } @@ -318,31 +318,31 @@ func (d *AllocDir) Move(other Interface, tasks []*structs.Task) error { } // Destroy tears down previously build directory structure. -func (d *AllocDir) Destroy() error { +func (a *AllocDir) Destroy() error { // Unmount all mounted shared alloc dirs. mErr := new(multierror.Error) - if err := d.UnmountAll(); err != nil { + if err := a.UnmountAll(); err != nil { mErr = multierror.Append(mErr, err) } - if err := os.RemoveAll(d.AllocDir); err != nil { - mErr = multierror.Append(mErr, fmt.Errorf("failed to remove alloc dir %q: %w", d.AllocDir, err)) + if err := os.RemoveAll(a.AllocDir); err != nil { + mErr = multierror.Append(mErr, fmt.Errorf("failed to remove alloc dir %q: %w", a.AllocDir, err)) } // Unset built since the alloc dir has been destroyed. - d.mu.Lock() - d.built = false - d.mu.Unlock() + a.mu.Lock() + a.built = false + a.mu.Unlock() return mErr.ErrorOrNil() } // UnmountAll linked/mounted directories in task dirs. -func (d *AllocDir) UnmountAll() error { - d.mu.RLock() - defer d.mu.RUnlock() +func (a *AllocDir) UnmountAll() error { + a.mu.RLock() + defer a.mu.RUnlock() mErr := new(multierror.Error) - for _, dir := range d.TaskDirs { + for _, dir := range a.TaskDirs { if err := dir.Unmount(); err != nil { mErr = multierror.Append(mErr, err) } @@ -352,41 +352,41 @@ func (d *AllocDir) UnmountAll() error { } // Build the directory tree for an allocation. -func (d *AllocDir) Build() error { +func (a *AllocDir) Build() error { // Make the alloc directory, owned by the nomad process. - if err := os.MkdirAll(d.AllocDir, fileMode755); err != nil { - return fmt.Errorf("Failed to make the alloc directory %v: %w", d.AllocDir, err) + if err := os.MkdirAll(a.AllocDir, fileMode755); err != nil { + return fmt.Errorf("Failed to make the alloc directory %v: %w", a.AllocDir, err) } // Make the shared directory and make it available to all user/groups. - if err := allocMkdirAll(d.SharedDir, fileMode755); err != nil { + if err := allocMkdirAll(a.SharedDir, fileMode755); err != nil { return err } // Create shared subdirs for _, dir := range SharedAllocDirs { - p := filepath.Join(d.SharedDir, dir) + p := filepath.Join(a.SharedDir, dir) if err := allocMkdirAll(p, fileMode777); err != nil { return err } } // Mark as built - d.mu.Lock() - d.built = true - d.mu.Unlock() + a.mu.Lock() + a.built = true + a.mu.Unlock() return nil } // List returns the list of files at a path relative to the alloc dir -func (d *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) { - if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { +func (a *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) { + if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil { return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") } - p := filepath.Join(d.AllocDir, path) + p := filepath.Join(a.AllocDir, path) finfos, err := os.ReadDir(p) if err != nil { return []*cstructs.AllocFileInfo{}, err @@ -409,14 +409,14 @@ func (d *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) { } // Stat returns information about the file at a path relative to the alloc dir -func (d *AllocDir) Stat(path string) (*cstructs.AllocFileInfo, error) { - if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { +func (a *AllocDir) Stat(path string) (*cstructs.AllocFileInfo, error) { + if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil { return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") } - p := filepath.Join(d.AllocDir, path) + p := filepath.Join(a.AllocDir, path) info, err := os.Stat(p) if err != nil { return nil, err @@ -459,28 +459,28 @@ func detectContentType(fileInfo os.FileInfo, path string) string { } // ReadAt returns a reader for a file at the path relative to the alloc dir -func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) { - if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { +func (a *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) { + if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil { return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") } - p := filepath.Join(d.AllocDir, path) + p := filepath.Join(a.AllocDir, path) // Check if it is trying to read into a secret directory - d.mu.RLock() - for _, dir := range d.TaskDirs { + a.mu.RLock() + for _, dir := range a.TaskDirs { if caseInsensitiveHasPrefix(p, dir.SecretsDir) { - d.mu.RUnlock() + a.mu.RUnlock() return nil, fmt.Errorf("Reading secret file prohibited: %s", path) } if caseInsensitiveHasPrefix(p, dir.PrivateDir) { - d.mu.RUnlock() + a.mu.RUnlock() return nil, fmt.Errorf("Reading private file prohibited: %s", path) } } - d.mu.RUnlock() + a.mu.RUnlock() f, err := os.Open(p) if err != nil { @@ -499,15 +499,15 @@ func caseInsensitiveHasPrefix(s, prefix string) bool { // BlockUntilExists blocks until the passed file relative the allocation // directory exists. The block can be cancelled with the passed context. -func (d *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan error, error) { - if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { +func (a *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan error, error) { + if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil { return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") } // Get the path relative to the alloc directory - p := filepath.Join(d.AllocDir, path) + p := filepath.Join(a.AllocDir, path) watcher := getFileWatcher(p) returnCh := make(chan error, 1) t := &tomb.Tomb{} @@ -525,8 +525,8 @@ func (d *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan erro // ChangeEvents watches for changes to the passed path relative to the // allocation directory. The offset should be the last read offset. The context is // used to clean up the watch. -func (d *AllocDir) ChangeEvents(ctx context.Context, path string, curOffset int64) (*watch.FileChanges, error) { - if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { +func (a *AllocDir) ChangeEvents(ctx context.Context, path string, curOffset int64) (*watch.FileChanges, error) { + if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil { return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") @@ -539,7 +539,7 @@ func (d *AllocDir) ChangeEvents(ctx context.Context, path string, curOffset int6 }() // Get the path relative to the alloc directory - p := filepath.Join(d.AllocDir, path) + p := filepath.Join(a.AllocDir, path) watcher := getFileWatcher(p) return watcher.ChangeEvents(t, curOffset) } diff --git a/client/allocdir/task_dir.go b/client/allocdir/task_dir.go index 143c6e753..1ba2c7f8a 100644 --- a/client/allocdir/task_dir.go +++ b/client/allocdir/task_dir.go @@ -98,28 +98,28 @@ type TaskDir struct { // create paths on disk. // // Call AllocDir.NewTaskDir to create new TaskDirs -func (d *AllocDir) newTaskDir(taskName string, secretsInMB int) *TaskDir { - taskDir := filepath.Join(d.AllocDir, taskName) - taskUnique := filepath.Base(d.AllocDir) + "-" + taskName +func (a *AllocDir) newTaskDir(taskName string, secretsInMB int) *TaskDir { + taskDir := filepath.Join(a.AllocDir, taskName) + taskUnique := filepath.Base(a.AllocDir) + "-" + taskName if secretsInMB == 0 { secretsInMB = defaultSecretDirTmpfsSize } return &TaskDir{ - AllocDir: d.AllocDir, + AllocDir: a.AllocDir, Dir: taskDir, - SharedAllocDir: filepath.Join(d.AllocDir, SharedAllocName), - LogDir: filepath.Join(d.AllocDir, SharedAllocName, LogDirName), + SharedAllocDir: filepath.Join(a.AllocDir, SharedAllocName), + LogDir: filepath.Join(a.AllocDir, SharedAllocName, LogDirName), SharedTaskDir: filepath.Join(taskDir, SharedAllocName), LocalDir: filepath.Join(taskDir, TaskLocal), SecretsDir: filepath.Join(taskDir, TaskSecrets), PrivateDir: filepath.Join(taskDir, TaskPrivate), - MountsAllocDir: filepath.Join(d.clientAllocMountsDir, taskUnique, "alloc"), - MountsTaskDir: filepath.Join(d.clientAllocMountsDir, taskUnique), - MountsSecretsDir: filepath.Join(d.clientAllocMountsDir, taskUnique, "secrets"), - skip: set.From[string]([]string{d.clientAllocDir, d.clientAllocMountsDir}), - logger: d.logger.Named("task_dir").With("task_name", taskName), + MountsAllocDir: filepath.Join(a.clientAllocMountsDir, taskUnique, "alloc"), + MountsTaskDir: filepath.Join(a.clientAllocMountsDir, taskUnique), + MountsSecretsDir: filepath.Join(a.clientAllocMountsDir, taskUnique, "secrets"), + skip: set.From[string]([]string{a.clientAllocDir, a.clientAllocMountsDir}), + logger: a.logger.Named("task_dir").With("task_name", taskName), secretsInMB: secretsInMB, } } diff --git a/client/lib/cgroupslib/editor.go b/client/lib/cgroupslib/editor.go index 13519424b..8dc0ea0ef 100644 --- a/client/lib/cgroupslib/editor.go +++ b/client/lib/cgroupslib/editor.go @@ -88,7 +88,7 @@ func (e *editor) Write(filename, content string) error { return os.WriteFile(path, []byte(content), 0644) } -// A Factory creates a Lifecycle which is an abstraction over the setup and +// Factory creates a Lifecycle which is an abstraction over the setup and // teardown routines used for creating and destroying cgroups used for // constraining Nomad tasks. func Factory(allocID, task string, cores bool) Lifecycle { @@ -106,7 +106,7 @@ func Factory(allocID, task string, cores bool) Lifecycle { } } -// A Lifecycle manages the lifecycle of the cgroup(s) of a task from the +// Lifecycle manages the lifecycle of the cgroup(s) of a task from the // perspective of the Nomad client. That is, it creates and deletes the cgroups // for a task, as well as provides last effort kill semantics for ensuring a // process cannot stay alive beyond the intent of the client. diff --git a/client/lib/nsutil/ns_linux.go b/client/lib/nsutil/ns_linux.go index a48cae205..edc65ab23 100644 --- a/client/lib/nsutil/ns_linux.go +++ b/client/lib/nsutil/ns_linux.go @@ -27,7 +27,8 @@ import ( "golang.org/x/sys/unix" ) -// Returns an object representing the current OS thread's network namespace +// GetCurrentNS returns an object representing the current OS thread's network +// namespace func GetCurrentNS() (NetNS, error) { // Lock the thread in case other goroutine executes in it and changes its // network namespace after getCurrentThreadNetNSPath(), otherwise it might @@ -140,7 +141,7 @@ func IsNSorErr(nspath string) error { } } -// Returns an object representing the namespace referred to by @path +// GetNS returns an object representing the namespace referred to by @path func GetNS(nspath string) (NetNS, error) { err := IsNSorErr(nspath) if err != nil { diff --git a/client/lib/numalib/topology.go b/client/lib/numalib/topology.go index 1d1147c8b..118a89eea 100644 --- a/client/lib/numalib/topology.go +++ b/client/lib/numalib/topology.go @@ -87,18 +87,18 @@ type Topology struct { OverrideWitholdCompute hw.MHz } -func (t *Topology) SetNodes(nodes *idset.Set[hw.NodeID]) { - t.nodeIDs = nodes +func (st *Topology) SetNodes(nodes *idset.Set[hw.NodeID]) { + st.nodeIDs = nodes if !nodes.Empty() { - t.Nodes = nodes.Slice() + st.Nodes = nodes.Slice() } else { - t.Nodes = []uint8{} + st.Nodes = []uint8{} } } -func (t *Topology) SetNodesFrom(nodes []uint8) { - t.nodeIDs = idset.From[hw.NodeID](nodes) - t.Nodes = nodes +func (st *Topology) SetNodesFrom(nodes []uint8) { + st.nodeIDs = idset.From[hw.NodeID](nodes) + st.Nodes = nodes } // A Core represents one logical (vCPU) core on a processor. Basically the slice diff --git a/command/agent/config.go b/command/agent/config.go index dfe49aabe..e6311a6af 100644 --- a/command/agent/config.go +++ b/command/agent/config.go @@ -2473,8 +2473,8 @@ func (s *ServerConfig) Merge(b *ServerConfig) *ServerConfig { } // Merge is used to merge two client configs together -func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig { - result := *a +func (c *ClientConfig) Merge(b *ClientConfig) *ClientConfig { + result := *c if b.Enabled { result.Enabled = true @@ -2615,10 +2615,10 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig { result.ServerJoin = result.ServerJoin.Merge(b.ServerJoin) } - if len(a.HostVolumes) == 0 && len(b.HostVolumes) != 0 { + if len(c.HostVolumes) == 0 && len(b.HostVolumes) != 0 { result.HostVolumes = structs.CopySliceClientHostVolumeConfig(b.HostVolumes) } else if len(b.HostVolumes) != 0 { - result.HostVolumes = structs.HostVolumeSliceMerge(a.HostVolumes, b.HostVolumes) + result.HostVolumes = structs.HostVolumeSliceMerge(c.HostVolumes, b.HostVolumes) } if b.CNIPath != "" { @@ -2640,7 +2640,7 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig { result.BridgeNetworkHairpinMode = true } - result.HostNetworks = a.HostNetworks + result.HostNetworks = c.HostNetworks if len(b.HostNetworks) != 0 { result.HostNetworks = append(result.HostNetworks, b.HostNetworks...) @@ -2660,9 +2660,9 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig { result.CgroupParent = b.CgroupParent } - result.Artifact = a.Artifact.Merge(b.Artifact) - result.Drain = a.Drain.Merge(b.Drain) - result.Users = a.Users.Merge(b.Users) + result.Artifact = c.Artifact.Merge(b.Artifact) + result.Drain = c.Drain.Merge(b.Drain) + result.Users = c.Users.Merge(b.Users) if b.NodeMaxAllocs != 0 { result.NodeMaxAllocs = b.NodeMaxAllocs @@ -2789,8 +2789,8 @@ func (t *Telemetry) Merge(b *Telemetry) *Telemetry { } // Merge is used to merge two port configurations. -func (a *Ports) Merge(b *Ports) *Ports { - result := *a +func (p *Ports) Merge(b *Ports) *Ports { + result := *p if b.HTTP != 0 { result.HTTP = b.HTTP diff --git a/command/agent/consul/catalog_testing.go b/command/agent/consul/catalog_testing.go index 61923f6fb..4f86eec6e 100644 --- a/command/agent/consul/catalog_testing.go +++ b/command/agent/consul/catalog_testing.go @@ -272,7 +272,7 @@ func (c *MockAgent) CheckRegs() []*api.AgentCheckRegistration { return regs } -// CheckRegister implements AgentAPI +// CheckRegisterOpts implements AgentAPI func (c *MockAgent) CheckRegisterOpts(check *api.AgentCheckRegistration, _ *api.QueryOptions) error { c.mu.Lock() defer c.mu.Unlock() diff --git a/command/agent/consul/service_client.go b/command/agent/consul/service_client.go index 762a9a2ff..52e32fa8f 100644 --- a/command/agent/consul/service_client.go +++ b/command/agent/consul/service_client.go @@ -168,7 +168,7 @@ type ACLsAPI interface { // sidecar - Consul's view (agent, not catalog) of the service definition of the sidecar // associated with existing that may or may not exist. // May be nil. -func (s *ServiceClient) agentServiceUpdateRequired(reason syncReason, wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool { +func (c *ServiceClient) agentServiceUpdateRequired(reason syncReason, wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool { switch reason { case syncPeriodic: // In a periodic sync with Consul, we need to respect the value of @@ -188,7 +188,7 @@ func (s *ServiceClient) agentServiceUpdateRequired(reason syncReason, wanted *ap maybeTweakTaggedAddresses(wanted, existing) // Okay now it is safe to compare. - return s.different(wanted, existing, sidecar) + return c.different(wanted, existing, sidecar) default: // A non-periodic sync with Consul indicates an operation has been set @@ -200,7 +200,7 @@ func (s *ServiceClient) agentServiceUpdateRequired(reason syncReason, wanted *ap maybeTweakTaggedAddresses(wanted, existing) // Okay now it is safe to compare. - return s.different(wanted, existing, sidecar) + return c.different(wanted, existing, sidecar) } } @@ -245,9 +245,9 @@ func maybeTweakTaggedAddresses(wanted *api.AgentServiceRegistration, existing *a // different compares the wanted state of the service registration with the actual // (cached) state of the service registration reported by Consul. If any of the // critical fields are not deeply equal, they considered different. -func (s *ServiceClient) different(wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool { +func (c *ServiceClient) different(wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool { trace := func(field string, left, right any) { - s.logger.Trace("registrations different", "id", wanted.ID, + c.logger.Trace("registrations different", "id", wanted.ID, "field", field, "wanted", fmt.Sprintf("%#v", left), "existing", fmt.Sprintf("%#v", right), ) } diff --git a/drivers/shared/executor/procstats/list_default.go b/drivers/shared/executor/procstats/list_default.go index 0efd62bf0..882434e6a 100644 --- a/drivers/shared/executor/procstats/list_default.go +++ b/drivers/shared/executor/procstats/list_default.go @@ -14,7 +14,7 @@ import ( "github.com/shirou/gopsutil/v3/process" ) -// List the process tree starting at the given executorPID +// ListByPid the process tree starting at the given executorPID func ListByPid(executorPID int) set.Collection[ProcessID] { result := set.New[ProcessID](10) diff --git a/e2e/v3/jobs3/jobs3.go b/e2e/v3/jobs3/jobs3.go index 14aa01562..5088deaa3 100644 --- a/e2e/v3/jobs3/jobs3.go +++ b/e2e/v3/jobs3/jobs3.go @@ -579,7 +579,7 @@ func Verbose(on bool) Option { } } -// Set an HCL variable. +// Var sets a HCL variable. func Var(key, value string) Option { return func(sub *Submission) { sub.vars[key] = value diff --git a/e2e/v3/namespaces3/namespaces3.go b/e2e/v3/namespaces3/namespaces3.go index 81b2a267d..95b04f4b7 100644 --- a/e2e/v3/namespaces3/namespaces3.go +++ b/e2e/v3/namespaces3/namespaces3.go @@ -65,10 +65,10 @@ func (ns *Namespace) String() string { return ns.Name } -func (n *Names) setClient() { +func (g *Names) setClient() { nomadClient, nomadErr := nomadapi.NewClient(nomadapi.DefaultConfig()) - must.NoError(n.t, nomadErr, must.Sprint("failed to create nomad api client")) - n.nomadClient = nomadClient + must.NoError(g.t, nomadErr, must.Sprint("failed to create nomad api client")) + g.nomadClient = nomadClient } func configure(t *testing.T, opts ...Option) Cleanup { @@ -117,7 +117,7 @@ func Create(t *testing.T, name string, opts ...Option) Cleanup { return configure(t, append(opts, opt)...) } -// Create namespaces of the given names. +// CreateN namespaces of the given names. func CreateN(t *testing.T, names []string, opts ...Option) Cleanup { creations := helper.ConvertSlice(names, func(name string) Option { namespace := &Namespace{Name: name} diff --git a/helper/group/group.go b/helper/group/group.go index 8803aa312..1b93f029d 100644 --- a/helper/group/group.go +++ b/helper/group/group.go @@ -35,8 +35,8 @@ func (g *Group) Wait() { g.wg.Wait() } -// Wait for all goroutines to exit, or for the context to finish. -// Must be called after all calls to Go complete. +// WaitWithContext waits for all goroutines to exit, or for the context to +// finish. Must be called after all calls to Go complete. func (g *Group) WaitWithContext(ctx context.Context) { doneCh := make(chan struct{}) go func() { diff --git a/nomad/job_endpoint_hook_consul_ce.go b/nomad/job_endpoint_hook_consul_ce.go index 304ef7320..cfdee1847 100644 --- a/nomad/job_endpoint_hook_consul_ce.go +++ b/nomad/job_endpoint_hook_consul_ce.go @@ -67,6 +67,6 @@ func (h jobConsulHook) validateCluster(name string) error { // Mutate ensures that the job's Consul cluster has been configured to be the // default Consul cluster if unset -func (j jobConsulHook) Mutate(job *structs.Job) (*structs.Job, []error, error) { - return j.mutateImpl(job, structs.ConsulDefaultCluster), nil, nil +func (h jobConsulHook) Mutate(job *structs.Job) (*structs.Job, []error, error) { + return h.mutateImpl(job, structs.ConsulDefaultCluster), nil, nil } diff --git a/nomad/job_endpoint_hook_vault_ce.go b/nomad/job_endpoint_hook_vault_ce.go index d4949323f..c1a3da0e8 100644 --- a/nomad/job_endpoint_hook_vault_ce.go +++ b/nomad/job_endpoint_hook_vault_ce.go @@ -35,7 +35,7 @@ func (h jobVaultHook) validateClustersForNamespace(_ *structs.Job, blocks map[st return nil } -func (j jobVaultHook) Mutate(job *structs.Job) (*structs.Job, []error, error) { +func (h jobVaultHook) Mutate(job *structs.Job) (*structs.Job, []error, error) { for _, tg := range job.TaskGroups { for _, task := range tg.Tasks { if task.Vault == nil || task.Vault.Cluster != "" { diff --git a/nomad/lock/ttl.go b/nomad/lock/ttl.go index 5989a33fa..11d02f669 100644 --- a/nomad/lock/ttl.go +++ b/nomad/lock/ttl.go @@ -98,7 +98,7 @@ func (t *TTLTimer) EmitMetrics(period time.Duration, shutdownCh chan struct{}) { } } -// timerNum returns the number of registered timers. +// TimerNum returns the number of registered timers. func (t *TTLTimer) TimerNum() int { t.lock.RLock() defer t.lock.RUnlock() diff --git a/nomad/operator_endpoint.go b/nomad/operator_endpoint.go index 0315116e4..e24d13e58 100644 --- a/nomad/operator_endpoint.go +++ b/nomad/operator_endpoint.go @@ -104,7 +104,7 @@ func (op *Operator) RaftGetConfiguration(args *structs.GenericRequest, reply *st return nil } -// COMPAT(1.12.0): RaftRemovePeerByAddress was used to support Raft Protocol v2, +// RaftRemovePeerByAddress COMPAT(1.12.0) was used to support Raft Protocol v2, // which was removed in Nomad 1.4.0 but the API was not removed. Remove this RPC // entirely in Nomad 1.12.0. func (op *Operator) RaftRemovePeerByAddress(_ *structs.RaftPeerByAddressRequest, _ *struct{}) error { diff --git a/nomad/structs/config/sentinel.go b/nomad/structs/config/sentinel.go index 9553955c7..c5b6aa81c 100644 --- a/nomad/structs/config/sentinel.go +++ b/nomad/structs/config/sentinel.go @@ -43,8 +43,8 @@ func (s *SentinelImport) Copy() *SentinelImport { } // Merge is used to merge two Sentinel configs together. The settings from the input always take precedence. -func (a *SentinelConfig) Merge(b *SentinelConfig) *SentinelConfig { - result := *a +func (s *SentinelConfig) Merge(b *SentinelConfig) *SentinelConfig { + result := *s if len(b.Imports) > 0 { result.Imports = append(result.Imports, b.Imports...) } diff --git a/nomad/structs/config/ui.go b/nomad/structs/config/ui.go index 0cd547e56..c4cb72791 100644 --- a/nomad/structs/config/ui.go +++ b/nomad/structs/config/ui.go @@ -48,20 +48,20 @@ type ContentSecurityPolicy struct { } // Copy returns a copy of this Vault UI config. -func (old *ContentSecurityPolicy) Copy() *ContentSecurityPolicy { - if old == nil { +func (csp *ContentSecurityPolicy) Copy() *ContentSecurityPolicy { + if csp == nil { return nil } nc := new(ContentSecurityPolicy) - *nc = *old - nc.ConnectSrc = slices.Clone(old.ConnectSrc) - nc.DefaultSrc = slices.Clone(old.DefaultSrc) - nc.FormAction = slices.Clone(old.FormAction) - nc.FrameAncestors = slices.Clone(old.FrameAncestors) - nc.ImgSrc = slices.Clone(old.ImgSrc) - nc.ScriptSrc = slices.Clone(old.ScriptSrc) - nc.StyleSrc = slices.Clone(old.StyleSrc) + *nc = *csp + nc.ConnectSrc = slices.Clone(csp.ConnectSrc) + nc.DefaultSrc = slices.Clone(csp.DefaultSrc) + nc.FormAction = slices.Clone(csp.FormAction) + nc.FrameAncestors = slices.Clone(csp.FrameAncestors) + nc.ImgSrc = slices.Clone(csp.ImgSrc) + nc.ScriptSrc = slices.Clone(csp.ScriptSrc) + nc.StyleSrc = slices.Clone(csp.StyleSrc) return nc } diff --git a/nomad/structs/devices.go b/nomad/structs/devices.go index 9ed3438db..5bdec46ea 100644 --- a/nomad/structs/devices.go +++ b/nomad/structs/devices.go @@ -24,7 +24,8 @@ type DeviceAccounterInstance struct { Instances map[string]int } -// Locality returns the NodeDeviceLocality of the instance of the specific deviceID. +// GetLocality returns the NodeDeviceLocality of the instance of the specific +// deviceID. // // If no instance matching the deviceID is found, nil is returned. func (dai *DeviceAccounterInstance) GetLocality(instanceID string) *NodeDeviceLocality { @@ -163,9 +164,9 @@ func (d *DeviceAccounter) AddReserved(res *AllocatedDeviceResource) (collision b } // FreeCount returns the number of free device instances -func (i *DeviceAccounterInstance) FreeCount() int { +func (dai *DeviceAccounterInstance) FreeCount() int { count := 0 - for _, c := range i.Instances { + for _, c := range dai.Instances { if c == 0 { count++ } diff --git a/nomad/structs/diff.go b/nomad/structs/diff.go index 4f5116afe..bb1755436 100644 --- a/nomad/structs/diff.go +++ b/nomad/structs/diff.go @@ -2827,23 +2827,23 @@ func portDiffs(old, new []Port, dynamic bool, contextual bool) []*ObjectDiff { } -func (r *NUMA) Diff(other *NUMA, contextual bool) *ObjectDiff { - if r.Equal(other) { +func (n *NUMA) Diff(other *NUMA, contextual bool) *ObjectDiff { + if n.Equal(other) { return nil } diff := &ObjectDiff{Type: DiffTypeNone, Name: "NUMA"} var oldPrimitiveFlat, newPrimitiveFlat map[string]string - if r == nil { + if n == nil { diff.Type = DiffTypeAdded newPrimitiveFlat = flatmap.Flatten(other, nil, true) } else if other == nil { diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(r, nil, true) + oldPrimitiveFlat = flatmap.Flatten(n, nil, true) } else { diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(r, nil, true) + oldPrimitiveFlat = flatmap.Flatten(n, nil, true) newPrimitiveFlat = flatmap.Flatten(other, nil, true) } diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) diff --git a/nomad/structs/keyring.go b/nomad/structs/keyring.go index 5dc85a05a..363f9bd70 100644 --- a/nomad/structs/keyring.go +++ b/nomad/structs/keyring.go @@ -88,7 +88,7 @@ func (k *UnwrappedRootKey) Copy() *UnwrappedRootKey { } } -// MakeInactive returns a copy of the RootKey with the meta state set to active +// MakeActive returns a copy of the RootKey with the meta state set to active func (k *UnwrappedRootKey) MakeActive() *UnwrappedRootKey { meta := k.Meta.Copy() meta.State = RootKeyStateActive diff --git a/nomad/structs/numa.go b/nomad/structs/numa.go index 8b79282ec..1b4d24ac7 100644 --- a/nomad/structs/numa.go +++ b/nomad/structs/numa.go @@ -118,7 +118,7 @@ func (r LegacyNodeCpuResources) empty() bool { return r.CpuShares == 0 || r.TotalCpuCores == 0 } -// NomadProcessorResources captures the CPU hardware resources of the Nomad node. +// NodeProcessorResources captures the CPU hardware resources of the Nomad node. // // In Nomad enterprise this structure is used to map tasks to NUMA nodes. type NodeProcessorResources struct { @@ -126,8 +126,9 @@ type NodeProcessorResources struct { Topology *numalib.Topology // do not modify } -// partial struct serialization / copy / merge sadness means this struct can -// exist with no data, which is a condition we must detect during the upgrade path +// Empty handles partial struct serialization / copy / merge sadness that means +// this struct can exist with no data, which is a condition we must detect +// during the upgrade path. func (r NodeProcessorResources) Empty() bool { return r.Topology == nil || len(r.Topology.Cores) == 0 } diff --git a/nomad/structs/variables.go b/nomad/structs/variables.go index a42d6a4f1..ff45118cf 100644 --- a/nomad/structs/variables.go +++ b/nomad/structs/variables.go @@ -391,8 +391,8 @@ func (vd VariableDecrypted) Validate() error { return nil } -// A new variable can be crated just to support a lock, it doesn't require to hold -// any items and it will validate the lock. +// ValidateForLock ensures a new variable can be created just to support a lock, +// it doesn't require to hold any items and it will validate the lock. func (vd VariableDecrypted) ValidateForLock() error { var mErr multierror.Error if vd.Namespace == AllNamespacesSentinel { diff --git a/nomad/worker.go b/nomad/worker.go index 29436e376..49b64536d 100644 --- a/nomad/worker.go +++ b/nomad/worker.go @@ -182,7 +182,7 @@ func (w *Worker) Resume() { } } -// Resume transitions a worker to the stopping state. Check +// Stop transitions a worker to the stopping state. Check // to see if the worker stopped by calling IsStopped() func (w *Worker) Stop() { w.setStatus(WorkerStopping) @@ -250,7 +250,7 @@ func (w *Worker) setWorkerStatusLocked(newStatus WorkerStatus) { w.status = newStatus } -// GetStatus returns the status of the Worker's Workload. +// GetWorkloadStatus returns the status of the Worker's Workload. func (w *Worker) GetWorkloadStatus() SchedulerWorkerStatus { w.statusLock.RLock() defer w.statusLock.RUnlock() @@ -578,7 +578,7 @@ type ErrMinIndexDeadlineExceeded struct { timeout time.Duration } -// Unwrapping an ErrMinIndexDeadlineExceeded always return +// Unwrap an ErrMinIndexDeadlineExceeded that always returns // context.DeadlineExceeded func (ErrMinIndexDeadlineExceeded) Unwrap() error { return context.DeadlineExceeded diff --git a/plugins/base/base.go b/plugins/base/base.go index 9e508d7c9..f6c873315 100644 --- a/plugins/base/base.go +++ b/plugins/base/base.go @@ -84,16 +84,16 @@ type ClientDriverConfig struct { Topology *numalib.Topology } -func (c *AgentConfig) toProto() *proto.NomadConfig { - if c == nil { +func (ac *AgentConfig) toProto() *proto.NomadConfig { + if ac == nil { return nil } cfg := &proto.NomadConfig{} - if c.Driver != nil { + if ac.Driver != nil { cfg.Driver = &proto.NomadDriverConfig{ - ClientMaxPort: uint32(c.Driver.ClientMaxPort), - ClientMinPort: uint32(c.Driver.ClientMinPort), - Topology: nomadTopologyToProto(c.Driver.Topology), + ClientMaxPort: uint32(ac.Driver.ClientMaxPort), + ClientMinPort: uint32(ac.Driver.ClientMinPort), + Topology: nomadTopologyToProto(ac.Driver.Topology), } } return cfg diff --git a/scheduler/feasible/device.go b/scheduler/feasible/device.go index 4bb2e72f6..4f34b0395 100644 --- a/scheduler/feasible/device.go +++ b/scheduler/feasible/device.go @@ -33,9 +33,9 @@ func newDeviceAllocator(ctx Context, n *structs.Node) *deviceAllocator { } } -func (da *deviceAllocator) Copy() *deviceAllocator { - accounter := da.DeviceAccounter.Copy() - allocator := &deviceAllocator{accounter, da.ctx} +func (d *deviceAllocator) Copy() *deviceAllocator { + accounter := d.DeviceAccounter.Copy() + allocator := &deviceAllocator{accounter, d.ctx} return allocator } diff --git a/scheduler/reconciler/filters.go b/scheduler/reconciler/filters.go index d272ce207..2c2192b07 100644 --- a/scheduler/reconciler/filters.go +++ b/scheduler/reconciler/filters.go @@ -267,9 +267,9 @@ func (set allocSet) filterByTainted(state ClusterState) (untainted, migrate, los // filterOutByClientStatus returns a new allocSet containing allocs that don't // have the specified client status -func (a allocSet) filterOutByClientStatus(clientStatuses ...string) allocSet { +func (set allocSet) filterOutByClientStatus(clientStatuses ...string) allocSet { allocs := make(allocSet) - for _, alloc := range a { + for _, alloc := range set { if !slices.Contains(clientStatuses, alloc.ClientStatus) { allocs[alloc.ID] = alloc } @@ -280,9 +280,9 @@ func (a allocSet) filterOutByClientStatus(clientStatuses ...string) allocSet { // filterByClientStatus returns a new allocSet containing allocs that have the // specified client status -func (a allocSet) filterByClientStatus(clientStatus string) allocSet { +func (set allocSet) filterByClientStatus(clientStatus string) allocSet { allocs := make(allocSet) - for _, alloc := range a { + for _, alloc := range set { if alloc.ClientStatus == clientStatus { allocs[alloc.ID] = alloc } @@ -450,9 +450,9 @@ func updateByReschedulable(alloc *structs.Allocation, now time.Time, evalID stri // delayByStopAfter returns a delay for any lost allocation that's got a // disconnect.stop_on_client_after configured -func (a allocSet) delayByStopAfter() (later []*delayedRescheduleInfo) { +func (set allocSet) delayByStopAfter() (later []*delayedRescheduleInfo) { now := time.Now().UTC() - for _, a := range a { + for _, a := range set { if !a.ShouldClientStop() { continue } @@ -472,10 +472,10 @@ func (a allocSet) delayByStopAfter() (later []*delayedRescheduleInfo) { // delayByLostAfter returns a delay for any unknown allocation // that has disconnect.lost_after configured -func (a allocSet) delayByLostAfter(now time.Time) ([]*delayedRescheduleInfo, error) { +func (set allocSet) delayByLostAfter(now time.Time) ([]*delayedRescheduleInfo, error) { var later []*delayedRescheduleInfo - for _, alloc := range a { + for _, alloc := range set { timeout := alloc.DisconnectTimeout(now) if !timeout.After(now) { return nil, errors.New("unable to computing disconnecting timeouts") diff --git a/scheduler/tests/testing.go b/scheduler/tests/testing.go index 21587951f..13bc9e527 100644 --- a/scheduler/tests/testing.go +++ b/scheduler/tests/testing.go @@ -316,12 +316,14 @@ func (h *Harness) SetNoSubmit() { h.noSubmit = true } -// helper method to create allocations with given jobs and resources +// CreateAlloc is helper method to create allocations with given jobs and +// resources func CreateAlloc(id string, job *structs.Job, resource *structs.Resources) *structs.Allocation { return CreateAllocInner(id, job, resource, nil, nil) } -// helper method to create allocation with network at the task group level +// CreateAllocWithTaskgroupNetwork is is helper method to create allocation with +// network at the task group level func CreateAllocWithTaskgroupNetwork(id string, job *structs.Job, resource *structs.Resources, tgNet *structs.NetworkResource) *structs.Allocation { return CreateAllocInner(id, job, resource, nil, tgNet) } diff --git a/testutil/tls.go b/testutil/tls.go index db44d4480..6855f4aee 100644 --- a/testutil/tls.go +++ b/testutil/tls.go @@ -13,7 +13,8 @@ import ( "github.com/shoenig/test/must" ) -// Assert CA file exists and is a valid CA Returns the CA +// IsValidCertificate asserts the CA file exists and is a valid CA Returns the +// CA func IsValidCertificate(t *testing.T, caPath string) *x509.Certificate { t.Helper() @@ -28,7 +29,8 @@ func IsValidCertificate(t *testing.T, caPath string) *x509.Certificate { return ca } -// Assert key file exists and is a valid signer returns a bool +// IsValidSigner asserts the key file exists and is a valid signer returns a +// bool func IsValidSigner(t *testing.T, keyPath string) bool { t.Helper() diff --git a/testutil/wait.go b/testutil/wait.go index 2a51c6b90..ecf911b9a 100644 --- a/testutil/wait.go +++ b/testutil/wait.go @@ -335,7 +335,7 @@ func WaitForRunning(t testing.TB, rpc rpcFn, job *structs.Job) []*structs.AllocL return WaitForRunningWithToken(t, rpc, job, "") } -// WaitforJobAllocStatus blocks until the ClientStatus of allocations for a job +// WaitForJobAllocStatus blocks until the ClientStatus of allocations for a job // match the expected map of : . func WaitForJobAllocStatus(t testing.TB, rpc rpcFn, job *structs.Job, allocStatus map[string]int) { t.Helper() @@ -387,7 +387,7 @@ func WaitForJobAllocStatusWithToken(t testing.TB, rpc rpcFn, job *structs.Job, a return allocs } -// WaitforJobEvalStatus blocks until the job's evals match the status described +// WaitForJobEvalStatus blocks until the job's evals match the status described // in the map of : . func WaitForJobEvalStatus(t testing.TB, rpc rpcFn, job *structs.Job, evalStatus map[string]int) []*structs.Evaluation { return WaitForJobEvalStatusWithToken(t, rpc, job, evalStatus, "") diff --git a/version/version.go b/version/version.go index 9e678a609..42bac3b3b 100644 --- a/version/version.go +++ b/version/version.go @@ -71,38 +71,38 @@ func GetVersion() *VersionInfo { } } -func (c *VersionInfo) VersionNumber() string { - version := c.Version +func (v *VersionInfo) VersionNumber() string { + version := v.Version - if c.VersionPrerelease != "" { - version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease) + if v.VersionPrerelease != "" { + version = fmt.Sprintf("%s-%s", version, v.VersionPrerelease) } - if c.VersionMetadata != "" { - version = fmt.Sprintf("%s+%s", version, c.VersionMetadata) + if v.VersionMetadata != "" { + version = fmt.Sprintf("%s+%s", version, v.VersionMetadata) } return version } -func (c *VersionInfo) FullVersionNumber(rev bool) string { +func (v *VersionInfo) FullVersionNumber(rev bool) string { var versionString bytes.Buffer - fmt.Fprintf(&versionString, "Nomad v%s", c.Version) - if c.VersionPrerelease != "" { - fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) + fmt.Fprintf(&versionString, "Nomad v%s", v.Version) + if v.VersionPrerelease != "" { + fmt.Fprintf(&versionString, "-%s", v.VersionPrerelease) } - if c.VersionMetadata != "" { - fmt.Fprintf(&versionString, "+%s", c.VersionMetadata) + if v.VersionMetadata != "" { + fmt.Fprintf(&versionString, "+%s", v.VersionMetadata) } - if !c.BuildDate.IsZero() { - fmt.Fprintf(&versionString, "\nBuildDate %s", c.BuildDate.Format(time.RFC3339)) + if !v.BuildDate.IsZero() { + fmt.Fprintf(&versionString, "\nBuildDate %s", v.BuildDate.Format(time.RFC3339)) } - if rev && c.Revision != "" { - fmt.Fprintf(&versionString, "\nRevision %s", c.Revision) + if rev && v.Revision != "" { + fmt.Fprintf(&versionString, "\nRevision %s", v.Revision) } return versionString.String() From 26554e544e2660b06d4153ddb64cd21688e851ed Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Fri, 25 Jul 2025 08:21:37 -0400 Subject: [PATCH 02/27] scheduler: move result mutation into `computeUpdates` (#26336) The `computeUpdate` method returns 4 different values, some of which are just different shapes of the same data and only ever get used to be applied to the result in the caller. Move the mutation of the result into `computeUpdates` to match the work done in #26325. Clean up the return signature so that only slices we need downstream are returned, and fix the incorrect docstring. Also fix a silent bug where the `inplace` set includes the original alloc and not the updated version. This has no functional change because all existing callers only ever look at the length of this slice, but it will prevent future bugs if that ever changes. Ref: https://github.com/hashicorp/nomad/pull/26325 Ref: https://hashicorp.atlassian.net/browse/NMD-819 --- scheduler/reconciler/reconcile_cluster.go | 36 +++++++++++------------ 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/scheduler/reconciler/reconcile_cluster.go b/scheduler/reconciler/reconcile_cluster.go index 4d526eb6e..f870fdc5b 100644 --- a/scheduler/reconciler/reconcile_cluster.go +++ b/scheduler/reconciler/reconcile_cluster.go @@ -544,12 +544,7 @@ func (a *AllocReconciler) computeGroup(group string, all allocSet) (*ReconcileRe // Do inplace upgrades where possible and capture the set of upgrades that // need to be done destructively. - var inplaceUpdateResult []*structs.Allocation - ignoreUpdates, inplace, inplaceUpdateResult, destructive := a.computeUpdates(tg, untainted) - result.InplaceUpdate = inplaceUpdateResult - - result.DesiredTGUpdates[group].Ignore += uint64(len(ignoreUpdates)) - result.DesiredTGUpdates[group].InPlaceUpdate += uint64(len(inplace)) + inplace, destructive := a.computeUpdates(untainted, tg, result) if !existingDeployment { dstate.DesiredTotal += len(destructive) + len(inplace) } @@ -1361,17 +1356,16 @@ func (a *AllocReconciler) reconcileReconnecting(reconnecting allocSet, all alloc } // computeUpdates determines which allocations for the passed group require -// updates. Three groups are returned: -// 1. Those that require no upgrades -// 2. Those that can be upgraded in-place. These are added to the results -// automatically since the function contains the correct state to do so, -// 3. Those that require destructive updates -func (a *AllocReconciler) computeUpdates(group *structs.TaskGroup, untainted allocSet) ( - ignore, inplaceUpdateMap allocSet, inplaceUpdateSlice []*structs.Allocation, destructive allocSet) { - // Determine the set of allocations that need to be updated - ignore = make(allocSet) - inplaceUpdateMap = make(allocSet) - inplaceUpdateSlice = make([]*structs.Allocation, 0) +// updates. This method updates the results with allocs to ignore and/or +// update. And two groups are returned: +// 1. Those that can be upgraded in-place +// 2. Those that require destructive updates +func (a *AllocReconciler) computeUpdates( + untainted allocSet, group *structs.TaskGroup, result *ReconcileResults, +) (inplace, destructive allocSet) { + + ignore := make(allocSet) + inplace = make(allocSet) destructive = make(allocSet) for _, alloc := range untainted { @@ -1381,10 +1375,14 @@ func (a *AllocReconciler) computeUpdates(group *structs.TaskGroup, untainted all } else if destructiveChange { destructive[alloc.ID] = alloc } else { - inplaceUpdateMap[alloc.ID] = alloc - inplaceUpdateSlice = append(inplaceUpdateSlice, inplaceAlloc) + inplace[alloc.ID] = inplaceAlloc } } + + result.InplaceUpdate = slices.Collect(maps.Values(inplace)) + result.DesiredTGUpdates[group.Name].Ignore += uint64(len(ignore)) + result.DesiredTGUpdates[group.Name].InPlaceUpdate += uint64(len(inplace)) + return } From f2417ffb89713300f432f30f63ae006f9c0e92fb Mon Sep 17 00:00:00 2001 From: James Rasell Date: Mon, 28 Jul 2025 15:15:33 +0200 Subject: [PATCH 03/27] ci: Update hclogvet and correctly run across codebase. (#26362) --- GNUmakefile | 4 ++-- client/allocrunner/networking_cni.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 346d1069a..c6125acd8 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -145,7 +145,7 @@ lint-deps: ## Install linter dependencies @echo "==> Updating linter dependencies..." go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.3.0 go install github.com/client9/misspell/cmd/misspell@v0.3.4 - go install github.com/hashicorp/go-hclog/hclogvet@feaf6d2ec20fd895e711195c99e3fde93a68afc5 + go install github.com/hashicorp/go-hclog/hclogvet@bd6194f1f5b126dbad2a3fdf3b9b6556cc3496c3 .PHONY: git-hooks git-dir = $(shell git rev-parse --git-dir) @@ -163,7 +163,7 @@ check: ## Lint the source code @cd ./api && golangci-lint run --config ../.golangci.yml --build-tags "$(GO_TAGS)" @echo "==> Linting hclog statements..." - @hclogvet . + @hclogvet ./... @echo "==> Spell checking website..." @misspell -error -source=text website/content/ diff --git a/client/allocrunner/networking_cni.go b/client/allocrunner/networking_cni.go index 7a8663501..ae31d68ec 100644 --- a/client/allocrunner/networking_cni.go +++ b/client/allocrunner/networking_cni.go @@ -594,10 +594,10 @@ func (c *cniNetworkConfigurator) Teardown(ctx context.Context, alloc *structs.Al // best effort cleanup ipv6 ipt, iptErr := c.newIPTables(structs.NodeNetworkAF_IPv6) if iptErr != nil { - c.logger.Debug("failed to detect ip6tables: %v", iptErr) + c.logger.Debug("failed to detect ip6tables", "error", iptErr) } else { if err := c.forceCleanup(ipt, alloc.ID); err != nil { - c.logger.Warn("ip6tables: %v", err) + c.logger.Warn("failed to cleanup iptables", "error", err) } } From 5bc5f4f9f12244d2228ec6d57102ff79ef5601b3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 10:02:27 -0400 Subject: [PATCH 04/27] chore(deps): bump github.com/aws/aws-sdk-go-v2/config (#26358) Bumps [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) from 1.29.17 to 1.29.18. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.29.17...config/v1.29.18) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-version: 1.29.18 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 16 ++++++++-------- go.sum | 32 ++++++++++++++++---------------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/go.mod b/go.mod index 2b6863176..38346c335 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/Masterminds/sprig/v3 v3.3.0 github.com/Microsoft/go-winio v0.6.2 github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e - github.com/aws/aws-sdk-go-v2/config v1.29.17 + github.com/aws/aws-sdk-go-v2/config v1.29.18 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 github.com/aws/smithy-go v1.22.4 github.com/container-storage-interface/spec v1.11.0 @@ -184,17 +184,17 @@ require ( github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go v1.55.6 // indirect github.com/aws/aws-sdk-go-v2 v1.36.6 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.70 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.71 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0 // indirect github.com/aws/aws-sdk-go-v2/service/ecs v1.53.8 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.18 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.1.0 // indirect diff --git a/go.sum b/go.sum index aa4423cf0..cc5f4e0bc 100644 --- a/go.sum +++ b/go.sum @@ -733,16 +733,16 @@ github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.36.6 h1:zJqGjVbRdTPojeCGWn5IR5pbJwSQSBh5RWFTQcEQGdU= github.com/aws/aws-sdk-go-v2 v1.36.6/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0= -github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0= -github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8= -github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc= +github.com/aws/aws-sdk-go-v2/config v1.29.18 h1:x4T1GRPnqKV8HMJOMtNktbpQMl3bIsfx8KbqmveUO2I= +github.com/aws/aws-sdk-go-v2/config v1.29.18/go.mod h1:bvz8oXugIsH8K7HLhBv06vDqnFv3NsGDt2Znpk7zmOU= +github.com/aws/aws-sdk-go-v2/credentials v1.17.71 h1:r2w4mQWnrTMJjOyIsZtGp3R3XGY3nqHn8C26C2lQWgA= +github.com/aws/aws-sdk-go-v2/credentials v1.17.71/go.mod h1:E7VF3acIup4GB5ckzbKFrCK0vTvEQxOxgdq4U3vcMCY= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 h1:D9ixiWSG4lyUBL2DDNK924Px9V/NBVpML90MHqyTADY= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33/go.mod h1:caS/m4DI+cij2paz3rtProRBI4s/+TCiWoaWZuQ9010= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 h1:SsytQyTMHMDPspp+spo7XwXTP44aJZZAC7fBV2C5+5s= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36/go.mod h1:Q1lnJArKRXkenyog6+Y+zr7WDpk4e6XlR6gs20bbeNo= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 h1:i2vNHQiXUvKhs3quBR6aqlgJaiaexz/aNvdCktW/kAM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36/go.mod h1:UdyGa7Q91id/sdyHPwth+043HhmP6yP9MBHgbZM0xo8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37 h1:osMWfm/sC/L4tvEdQ65Gri5ZZDCUpuYJZbTTDrsn4I0= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37/go.mod h1:ZV2/1fbjOPr4G4v38G3Ww5TBT4+hmsK45s/rxu1fGy0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37 h1:v+X21AvTb2wZ+ycg1gx+orkB/9U6L7AOp93R7qYxsxM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37/go.mod h1:G0uM1kyssELxmJ2VZEfG0q2npObR3BAkF3c1VsfVnfs= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0 h1:3hH6o7Z2WeE1twvz44Aitn6Qz8DZN3Dh5IB4Eh2xq7s= @@ -751,14 +751,14 @@ github.com/aws/aws-sdk-go-v2/service/ecs v1.53.8 h1:v1OectQdV/L+KSFSiqK00fXGN8Fb github.com/aws/aws-sdk-go-v2/service/ecs v1.53.8/go.mod h1:F0DbgxpvuSvtYun5poG67EHLvci4SgzsMVO6SsPUqKk= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 h1:CXV68E2dNqhuynZJPB80bhPQwAKqBWVer887figW6Jc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4/go.mod h1:/xFi9KtvBXP97ppCz1TAEvU1Uf66qvid89rbem3wCzQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 h1:t0E6FzREdtCsiLIoLCWsYliNsRBgyGD/MCK571qk4MI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17/go.mod h1:ygpklyoaypuyDvOM5ujWGrYWpAK3h7ugnmKCU/76Ys4= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E= -github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0= -github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.18 h1:vvbXsA2TVO80/KT7ZqCbx934dt6PY+vQ8hZpUZ/cpYg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.18/go.mod h1:m2JJHledjBGNMsLOF1g9gbAxprzq3KjC8e4lxtn+eWg= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.6 h1:rGtWqkQbPk7Bkwuv3NzpE/scwwL9sC1Ul3tn9x83DUI= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.6/go.mod h1:u4ku9OLv4TO4bCPdxf4fA1upaMaJmP9ZijGk3AAOC6Q= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4 h1:OV/pxyXh+eMA0TExHEC4jyWdumLxNbzz1P0zJoezkJc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4/go.mod h1:8Mm5VGYwtm+r305FfPSuc+aFkrypeylGYhFim6XEPoc= +github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 h1:aUrLQwJfZtwv3/ZNG2xRtEen+NqI3iesuacjP51Mv1s= +github.com/aws/aws-sdk-go-v2/service/sts v1.34.1/go.mod h1:3wFBZKoWnX3r+Sm7in79i54fBmNfwhdNdQuscCw7QIk= github.com/aws/smithy-go v1.22.4 h1:uqXzVZNuNexwc/xrh6Tb56u89WDlJY6HS+KC0S4QSjw= github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= From e561bdb476d09a37f15635a4b47772c9228adc1a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 10:02:59 -0400 Subject: [PATCH 05/27] chore(deps): bump github.com/hashicorp/consul-template (#26356) Bumps [github.com/hashicorp/consul-template](https://github.com/hashicorp/consul-template) from 0.41.0 to 0.41.1. - [Release notes](https://github.com/hashicorp/consul-template/releases) - [Changelog](https://github.com/hashicorp/consul-template/blob/v0.41.1/CHANGELOG.md) - [Commits](https://github.com/hashicorp/consul-template/compare/v0.41.0...v0.41.1) --- updated-dependencies: - dependency-name: github.com/hashicorp/consul-template dependency-version: 0.41.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 18 +++++++++--------- go.sum | 55 ++++++++++++++++++------------------------------------- 2 files changed, 27 insertions(+), 46 deletions(-) diff --git a/go.mod b/go.mod index 38346c335..878a2481b 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/hashicorp/cap v0.9.0 github.com/hashicorp/cli v1.1.7 - github.com/hashicorp/consul-template v0.41.0 + github.com/hashicorp/consul-template v0.41.1 github.com/hashicorp/consul/api v1.32.1 github.com/hashicorp/consul/sdk v0.16.2 github.com/hashicorp/cronexpr v1.1.2 @@ -150,7 +150,7 @@ require ( cloud.google.com/go/longrunning v0.6.4 // indirect cloud.google.com/go/monitoring v1.23.0 // indirect cloud.google.com/go/storage v1.50.0 // indirect - dario.cat/mergo v1.0.1 // indirect + dario.cat/mergo v1.0.2 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 // indirect @@ -169,7 +169,7 @@ require ( github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 // indirect - github.com/BurntSushi/toml v1.3.2 // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/DataDog/datadog-go v3.2.0+incompatible // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 // indirect @@ -226,7 +226,7 @@ require ( github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-jose/go-jose/v4 v4.1.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect @@ -248,7 +248,7 @@ require ( github.com/hashicorp/go-discover/provider/gce v0.0.0-20241120163552-5eb1507d16b4 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v1.1.6-0.20240304204939-8824e8ccc35f // indirect - github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 // indirect @@ -259,7 +259,7 @@ require ( github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/mdns v1.0.5 // indirect github.com/hashicorp/raft-boltdb v0.0.0-20250113192317-e8660f88bcc9 // indirect - github.com/hashicorp/vault/api/auth/kubernetes v0.5.0 // indirect + github.com/hashicorp/vault/api/auth/kubernetes v0.10.0 // indirect github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 // indirect github.com/huandu/xstrings v1.5.0 // indirect github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect @@ -334,12 +334,12 @@ require ( go.opentelemetry.io/otel/sdk v1.35.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect go.opentelemetry.io/otel/trace v1.35.0 // indirect - golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 // indirect - golang.org/x/net v0.41.0 // indirect + golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc // indirect + golang.org/x/net v0.42.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/term v0.33.0 // indirect golang.org/x/text v0.27.0 // indirect - golang.org/x/tools v0.34.0 // indirect + golang.org/x/tools v0.35.0 // indirect google.golang.org/api v0.217.0 // indirect google.golang.org/genproto v0.0.0-20250115164207-1a7da9e5054f // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect diff --git a/go.sum b/go.sum index cc5f4e0bc..d770209c8 100644 --- a/go.sum +++ b/go.sum @@ -616,8 +616,8 @@ cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoIS cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= -dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= @@ -670,8 +670,8 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mo github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 h1:H5xDQaE3XowWfhZRUpnfC+rGZMEVoSiji+b+/HFAPU4= github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= @@ -724,7 +724,6 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= @@ -778,7 +777,6 @@ github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= @@ -932,11 +930,10 @@ github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmn github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY= github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= -github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY= -github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -963,7 +960,6 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= @@ -1130,8 +1126,8 @@ github.com/hashicorp/cap v0.9.0 h1:B5IZT7VL1ruSCtVBXSIyWDpkAFiEZt4bQFk1e2WwCb0= github.com/hashicorp/cap v0.9.0/go.mod h1:J00roe8PFFYXfedm3WcO6sGVaKeYElmNOuqfi8Uero4= github.com/hashicorp/cli v1.1.7 h1:/fZJ+hNdwfTSfsxMBa9WWMlfjUZbX8/LnUxgAd7lCVU= github.com/hashicorp/cli v1.1.7/go.mod h1:e6Mfpga9OCT1vqzFuoGZiiF/KaG9CbUfO5s3ghU3YgU= -github.com/hashicorp/consul-template v0.41.0 h1:yPrJQLI5SHKmvMcWnkMfm4deNUd3ZjefjmFiESkLQ50= -github.com/hashicorp/consul-template v0.41.0/go.mod h1:we3omhscaVJyMrWZUA8LA98brn+YqaNgZQazjy7xyQk= +github.com/hashicorp/consul-template v0.41.1 h1:6VM6kzyBt7xpHfeSjuSRFO6X0/pPdIy+AJpNlM8PekM= +github.com/hashicorp/consul-template v0.41.1/go.mod h1:RUPYCBLEnJVxzkJ2O52A20rGEx/K2oz16mPiuRCUWZA= github.com/hashicorp/consul/api v1.32.1 h1:0+osr/3t/aZNAdJX558crU3PEjVrG4x6715aZHRgceE= github.com/hashicorp/consul/api v1.32.1/go.mod h1:mXUWLnxftwTmDv4W3lzxYCPD199iNLLUyLfLGFJbtl4= github.com/hashicorp/consul/sdk v0.16.2 h1:cGX/djeEe9r087ARiKVWwVWCF64J+yW0G6ftZMZYbj0= @@ -1146,7 +1142,6 @@ github.com/hashicorp/go-bexpr v0.1.14/go.mod h1:gN7hRKB3s7yT+YvTdnhZVLTENejvhlkZ github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-connlimit v0.3.1 h1:v5A31V0FfXNYAtWP6BFtRhs8Nhr650a1HJmwnQ2pM7U= @@ -1164,7 +1159,6 @@ github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMC github.com/hashicorp/go-getter v1.7.8 h1:mshVHx1Fto0/MydBekWan5zUipGq7jO0novchgMmSiY= github.com/hashicorp/go-getter v1.7.8/go.mod h1:2c6CboOEb9jG6YvmC9xdD+tyAFsrUaJPedwXDGr0TM4= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= @@ -1201,9 +1195,8 @@ github.com/hashicorp/go-netaddrs v0.1.0/go.mod h1:33+a/emi5R5dqRspOuZKO0E+Tuz5WV github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= -github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= @@ -1212,12 +1205,10 @@ github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6/go.mod h1:MpCPSPGLDILGb4JMm94/mMi3YysIqsXzGCzkEZjcjXg= github.com/hashicorp/go-secure-stdlib/listenerutil v0.1.10 h1:2iDz+t0JLl1W0tJhvmhsh/UBgT1JgC8Qxz8HxYMWXQo= github.com/hashicorp/go-secure-stdlib/listenerutil v0.1.10/go.mod h1:eZkXE+osawMrAWR4wJRmyKauUwH6mNGbjFuiDujnbPk= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.9 h1:FW0YttEnUNDJ2WL9XcrrfteS1xW8u+sh4ggM8pN5isQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.9/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 h1:SMGUnbpAcat8rIKHkBPjfv81yC46a8eCNZ2hsR2l1EI= github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1/go.mod h1:Ch/bf00Qnx77MZd49JRgHYqHQjtEmTgGU2faufpVZb0= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3 h1:xbrxd0U9XQW8qL1BAz2XrAjAF/P2vcqUTAues9c24B8= @@ -1226,7 +1217,6 @@ github.com/hashicorp/go-set/v2 v2.1.0 h1:iERPCQWks+I+4bTgy0CT2myZsCqNgBg79ZHqwni github.com/hashicorp/go-set/v2 v2.1.0/go.mod h1:6q4nh8UCVZODn2tJ5RbJi8+ki7pjZBsAEYGt6yaGeTo= github.com/hashicorp/go-set/v3 v3.0.0 h1:CaJBQvQCOWoftrBcDt7Nwgo0kdpmrKxar/x2o6pV9JA= github.com/hashicorp/go-set/v3 v3.0.0/go.mod h1:IEghM2MpE5IaNvL+D7X480dfNtxjRXZ6VMpK3C8s2ok= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= @@ -1269,11 +1259,10 @@ github.com/hashicorp/raft-boltdb/v2 v2.3.1 h1:ackhdCNPKblmOhjEU9+4lHSJYFkJd6Jqyv github.com/hashicorp/raft-boltdb/v2 v2.3.1/go.mod h1:n4S+g43dXF1tqDT+yzcXHhXM6y7MrlUd3TTwGRcUvQE= github.com/hashicorp/serf v0.10.2 h1:m5IORhuNSjaxeljg5DeQVDlQyVkhRIjJDimbkCa8aAc= github.com/hashicorp/serf v0.10.2/go.mod h1:T1CmSGfSeGfnfNy/w0odXQUR1rfECGd2Qdsp84DjOiY= -github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= github.com/hashicorp/vault/api v1.20.0 h1:KQMHElgudOsr+IbJgmbjHnCTxEpKs9LnozA1D3nozU4= github.com/hashicorp/vault/api v1.20.0/go.mod h1:GZ4pcjfzoOWpkJ3ijHNpEoAxKEsBJnVljyTe3jM2Sms= -github.com/hashicorp/vault/api/auth/kubernetes v0.5.0 h1:CXO0fD7M3iCGovP/UApeHhPcH4paDFKcu7AjEXi94rI= -github.com/hashicorp/vault/api/auth/kubernetes v0.5.0/go.mod h1:afrElBIO9Q4sHFVuVWgNevG4uAs1bT2AZFA9aEiI608= +github.com/hashicorp/vault/api/auth/kubernetes v0.10.0 h1:5rqWmUFxnu3S7XYq9dafURwBgabYDFzo2Wv+AMopPHs= +github.com/hashicorp/vault/api/auth/kubernetes v0.10.0/go.mod h1:cZZmhF6xboMDmDbMY52oj2DKW6gS0cQ9g0pJ5XIXQ5U= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= @@ -1364,7 +1353,6 @@ github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -1387,7 +1375,6 @@ github.com/miekg/dns v1.1.67 h1:kg0EHj0G4bfT5/oOys6HhZw4vmMlnoZ+gDu8tJ/AlI0= github.com/miekg/dns v1.1.67/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -1401,7 +1388,6 @@ github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-testing-interface v1.14.2-0.20210821155943-2d9075ca8770 h1:drhDO54gdT/a15GBcMRmunZiNcLgPiFIJa23KzmcvcU= github.com/mitchellh/go-testing-interface v1.14.2-0.20210821155943-2d9075ca8770/go.mod h1:SO/iHr6q2EzbqRApt+8/E9wqebTwQn5y+UlB04bxzo0= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= @@ -1495,7 +1481,6 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= @@ -1552,7 +1537,6 @@ github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.2+incompatible h1:C89EOx/XBWwIXl8wm8OPJBd7kPF25UfsK2X7Ph/zCAk= github.com/ryanuber/columnize v2.1.2+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= @@ -1715,7 +1699,6 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1746,8 +1729,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI= -golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ= +golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc h1:TS73t7x3KarrNd5qAipmspBDS1rkMcgVG/fS1aRb4Rc= +golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1863,8 +1846,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1918,7 +1901,6 @@ golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2070,7 +2052,6 @@ golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2144,8 +2125,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From fe42c5bab0025a71be19ce704b9d49c04b3cb440 Mon Sep 17 00:00:00 2001 From: James Rasell Date: Mon, 28 Jul 2025 16:53:40 +0200 Subject: [PATCH 06/27] ci: Revert hclogvet running across entire codebase. (#26365) It seems the tool requires a little attention and does not run well across our enterprise codebase. Rolling back that makefile change, so it does not stop enterprise work, backport, CI, etc. --- GNUmakefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GNUmakefile b/GNUmakefile index c6125acd8..20474f6d6 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -163,7 +163,7 @@ check: ## Lint the source code @cd ./api && golangci-lint run --config ../.golangci.yml --build-tags "$(GO_TAGS)" @echo "==> Linting hclog statements..." - @hclogvet ./... + @hclogvet . @echo "==> Spell checking website..." @misspell -error -source=text website/content/ From a90f82bd0f9ba5ac580ec0a924633dbc3f3e36cc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 11:00:15 -0400 Subject: [PATCH 07/27] chore(deps): bump github.com/aws/smithy-go from 1.22.4 to 1.22.5 (#26355) Bumps [github.com/aws/smithy-go](https://github.com/aws/smithy-go) from 1.22.4 to 1.22.5. - [Release notes](https://github.com/aws/smithy-go/releases) - [Changelog](https://github.com/aws/smithy-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/smithy-go/compare/v1.22.4...v1.22.5) --- updated-dependencies: - dependency-name: github.com/aws/smithy-go dependency-version: 1.22.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 878a2481b..bdeb4eba8 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e github.com/aws/aws-sdk-go-v2/config v1.29.18 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 - github.com/aws/smithy-go v1.22.4 + github.com/aws/smithy-go v1.22.5 github.com/container-storage-interface/spec v1.11.0 github.com/containerd/errdefs v1.0.0 github.com/containerd/go-cni v1.1.12 diff --git a/go.sum b/go.sum index d770209c8..031b98873 100644 --- a/go.sum +++ b/go.sum @@ -758,8 +758,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4 h1:OV/pxyXh+eMA0TExHEC4jyWd github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4/go.mod h1:8Mm5VGYwtm+r305FfPSuc+aFkrypeylGYhFim6XEPoc= github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 h1:aUrLQwJfZtwv3/ZNG2xRtEen+NqI3iesuacjP51Mv1s= github.com/aws/aws-sdk-go-v2/service/sts v1.34.1/go.mod h1:3wFBZKoWnX3r+Sm7in79i54fBmNfwhdNdQuscCw7QIk= -github.com/aws/smithy-go v1.22.4 h1:uqXzVZNuNexwc/xrh6Tb56u89WDlJY6HS+KC0S4QSjw= -github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= +github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From d418260b6d5bdc1d87d286e9178f2282777dcf23 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 11:27:49 -0400 Subject: [PATCH 08/27] chore(deps): bump google.golang.org/grpc from 1.73.0 to 1.74.2 (#26357) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.73.0 to 1.74.2. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.73.0...v1.74.2) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-version: 1.74.2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 26 +++++++++++++------------- go.sum | 52 ++++++++++++++++++++++++++-------------------------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/go.mod b/go.mod index bdeb4eba8..4a4b752f0 100644 --- a/go.mod +++ b/go.mod @@ -132,7 +132,7 @@ require ( golang.org/x/sync v0.16.0 golang.org/x/sys v0.34.0 golang.org/x/time v0.12.0 - google.golang.org/grpc v1.73.0 + google.golang.org/grpc v1.74.2 google.golang.org/protobuf v1.36.6 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 oss.indeed.com/go/libtime v1.6.0 @@ -140,11 +140,11 @@ require ( ) require ( - cel.dev/expr v0.23.0 // indirect + cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.118.0 // indirect cloud.google.com/go/auth v0.14.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect - cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect cloud.google.com/go/iam v1.3.1 // indirect cloud.google.com/go/kms v1.20.5 // indirect cloud.google.com/go/longrunning v0.6.4 // indirect @@ -207,7 +207,7 @@ require ( github.com/cilium/ebpf v0.16.0 // indirect github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible // indirect github.com/circonus-labs/circonusllhist v0.1.3 // indirect - github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/containerd/console v1.0.4 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect @@ -227,7 +227,7 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-jose/go-jose/v4 v4.1.1 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect @@ -325,15 +325,15 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/errs v1.4.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/sdk v1.35.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/sdk v1.36.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc // indirect golang.org/x/net v0.42.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect @@ -342,8 +342,8 @@ require ( golang.org/x/tools v0.35.0 // indirect google.golang.org/api v0.217.0 // indirect google.golang.org/genproto v0.0.0-20250115164207-1a7da9e5054f // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/resty.v1 v1.12.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 031b98873..f4b9d2393 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.23.0 h1:wUb94w6OYQS4uXraxo9U+wUAs9jT47Xvl4iPgAwM2ss= -cel.dev/expr v0.23.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -184,8 +184,8 @@ cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -816,8 +816,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k= -github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/container-storage-interface/spec v1.11.0 h1:H/YKTOeUZwHtyPOr9raR+HgFmGluGCklulxDYxSdVNM= github.com/container-storage-interface/spec v1.11.0/go.mod h1:DtUvaQszPml1YJfIK7c00mlv6/g4wNMLanLgiUbKFRI= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= @@ -945,8 +945,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= @@ -1654,15 +1654,15 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.35.0 h1:bGvFt68+KTiAKFlacHW6AhA56GF2rS0bdD3aJYEnmzA= -go.opentelemetry.io/contrib/detectors/gcp v1.35.0/go.mod h1:qGWP8/+ILwMRIUf9uIVLloR1uo5ZYAslM4O6OqUi1DA= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 h1:R/OBkMoGgfy2fLhs2QhkCI1w4HLEQX92GCcJB6SSdNk= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 h1:giGm8w67Ja7amYNfYMdme7xSp2pIxThWopw8+QP51Yk= @@ -1671,16 +1671,16 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0 h1:Ydage/ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -2344,10 +2344,10 @@ google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20250115164207-1a7da9e5054f h1:387Y+JbxF52bmesc8kq1NyYIp33dnxCw6eiA7JMsTmw= google.golang.org/genproto v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:0joYwWwLQh18AOj8zMYeZLjzuqcYTU3/nC5JdCvC3JI= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a h1:SGktgSolFCo75dnHJF2yMvnns6jCmHFJ0vE4Vn2JKvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a/go.mod h1:a77HrdMjoeKbnd2jmgcWdaS++ZLZAEq3orIOAEIKiVw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -2389,8 +2389,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= +google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= From 6e5ecb6bb047c62eb4392beecb48d0f10ec85c7c Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 28 Jul 2025 12:03:30 -0400 Subject: [PATCH 09/27] E2E: update Consul/Vault compat versions tested (#26369) Update our E2E compatibility test for Consul and Vault to only include back to the oldest-supported LTS versions of Consul and Vault. This will still leave a few unsupported non-LTS versions in the matrix between the two oldest LTS, but this is a small number of tests and fixing it would mean hard-coding the LTS support matrix in our tests. --- e2e/consulcompat/shared_download_test.go | 2 +- e2e/vaultcompat/vaultcompat_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/consulcompat/shared_download_test.go b/e2e/consulcompat/shared_download_test.go index cd909d285..d337fad48 100644 --- a/e2e/consulcompat/shared_download_test.go +++ b/e2e/consulcompat/shared_download_test.go @@ -21,7 +21,7 @@ import ( const ( binDir = "consul-bins" - minConsulVersion = "1.16.0" + minConsulVersion = "1.18.0" // oldest supported LTS // environment variable to pick only one Consul version for testing exactConsulVersionEnv = "NOMAD_E2E_CONSULCOMPAT_CONSUL_VERSION" diff --git a/e2e/vaultcompat/vaultcompat_test.go b/e2e/vaultcompat/vaultcompat_test.go index 93575be66..f4cf37f9f 100644 --- a/e2e/vaultcompat/vaultcompat_test.go +++ b/e2e/vaultcompat/vaultcompat_test.go @@ -292,7 +292,7 @@ func downloadVaultBuild(t *testing.T, b build) { } func getMinimumVersion(t *testing.T) *version.Version { - v, err := version.NewVersion("1.11.0") + v, err := version.NewVersion("1.16.0") // oldest supported LTS must.NoError(t, err) return v } From 513ec0248695f7fbbcd567e3099877b48b132496 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 28 Jul 2025 13:48:01 -0400 Subject: [PATCH 10/27] docs: explain access modes for CSI and DHV volumes (#26352) The documentation for CSI and DHV has a list of the available access modes, but doesn't explain what they mean in terms of what jobs can request, the scheduler behavior, or the CSI plugin behavior. Expand on the information available in the CSI specification and provide a description of DHV's behavior as well. Ref: https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume --- .../volume/capability.mdx | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/website/content/docs/other-specifications/volume/capability.mdx b/website/content/docs/other-specifications/volume/capability.mdx index 652fe0015..6dd3df44e 100644 --- a/website/content/docs/other-specifications/volume/capability.mdx +++ b/website/content/docs/other-specifications/volume/capability.mdx @@ -42,21 +42,32 @@ for each capability you intend to use in a job's [`volume`] block. - For CSI volumes the `access_mode` is required. Can be one of the following: - - `"single-node-reader-only"` - - `"single-node-writer"` - - `"multi-node-reader-only"` - - `"multi-node-single-writer"` - - `"multi-node-multi-writer"` + - `"single-node-reader-only"`: Jobs can only request the volume with + read-only access, and only one node can mount the volume at a time. + - `"single-node-writer"`: Jobs can request the volume with read/write or + read-only access, and only one node can mount the volume at a time. + - `"multi-node-reader-only"`: Jobs can only request the volume with + read-only access, but multiple nodes can mount the volume simultaneously. + - `"multi-node-single-writer"`: Jobs can request the volume with read/write + or read-only access, but the scheduler only allows one allocation to have + read/write access. Multiple nodes can mount the volume simultaneously. + - `"multi-node-multi-writer"`: Jobs can request the volume with read/write + or read-only access, and the scheduler allows multiple allocations to have + read/write access. Multiple nodes can mount the volume simultaneously. - Most CSI plugins support only single-node modes. - Consult the documentation of the storage provider and CSI plugin. + Most CSI plugins support only single-node modes. Consult the documentation + of the storage provider and CSI plugin. - For dynamic host volumes the `access_mode` is optional. Can be one of the following: - - `"single-node-writer"` - - `"single-node-reader-only"` - - `"single-node-single-writer"` - - `"single-node-multi-writer"` + - `"single-node-writer"`: Jobs can only request the volume with read/write access. + - `"single-node-reader-only"`: Jobs can only request the volume with read-only access. + - `"single-node-single-writer"`: Jobs can request either read/write or + read-only access, but the scheduler only allows one allocation to have + read/write access. + - `"single-node-multi-writer"`: Jobs can request either read/write or + read-only access, and the scheduler allows multiple allocations to have + read/write access. In the job specification, the default is `single-node-writer` unless `read_only = true`, which translates to `single-node-reader-only`. From 192dec4297113bb4ae12e320ea4b3892e54d9016 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 28 Jul 2025 13:48:23 -0400 Subject: [PATCH 11/27] docs: fix self-referencing link for raw_exec driver config (#26353) During the big docs rearchitecture, we split up the task driver pages into separate job declaration and driver configuration pages. The link for the `raw_exec` driver to the configuration page is a self-reference. --- website/content/docs/job-declare/task-driver/raw_exec.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/job-declare/task-driver/raw_exec.mdx b/website/content/docs/job-declare/task-driver/raw_exec.mdx index 690b60b10..105dc41ba 100644 --- a/website/content/docs/job-declare/task-driver/raw_exec.mdx +++ b/website/content/docs/job-declare/task-driver/raw_exec.mdx @@ -13,7 +13,7 @@ isolation. Further, the task is started as the same user as the Nomad process. As such, it should be used with extreme care and is disabled by default. Refer to [Configure the Raw Fork/Exec task -driver](/nomad/docs/job-declare/task-driver/raw_exec) for capabilities, client +driver](/nomad/docs/deploy/task-driver/raw_exec) for capabilities, client requirements, and plugin configuration. ## Task configuration From b286a8ee9cabb1dced8e6be7c3b60ff869cbca09 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 28 Jul 2025 13:48:38 -0400 Subject: [PATCH 12/27] docs: update Consul/Vault compatibility matrix (#26368) Update our support matrix to show currently-supported versions of Consul, Vault, and Nomad. --- website/content/docs/networking/consul/index.mdx | 12 ++++++------ website/content/docs/secure/vault/index.mdx | 12 +++++++----- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/website/content/docs/networking/consul/index.mdx b/website/content/docs/networking/consul/index.mdx index db23923c2..650e1bcf2 100644 --- a/website/content/docs/networking/consul/index.mdx +++ b/website/content/docs/networking/consul/index.mdx @@ -132,15 +132,15 @@ the [`consul.cluster`][] parameter. ## Compatibility All currently supported versions of Nomad are compatible with recent versions of -Consul, with some exceptions. +Consul. * Nomad is not compatible with Consul Data Plane. -| | Consul 1.17.0+ | Consul 1.18.0+ | Consul 1.19.0+ | -|-------------------|----------------|----------------|----------------| -| Nomad 1.8.0+ | ✅ | ✅ | ✅ | -| Nomad 1.7.0+ | ✅ | ✅ | ✅ | -| Nomad 1.6.0+ | ✅ | ✅ | ✅ | +| | Consul 1.19.0+ | Consul 1.20.0+ | Consul 1.21.0+ | +|---------------|----------------|----------------|----------------| +| Nomad 1.10.0+ | ✅ | ✅ | ✅ | +| Nomad 1.9.0+ | ✅ | ✅ | ✅ | +| Nomad 1.8.0+ | ✅ | ✅ | ✅ | [Automatic Clustering with Consul]: /nomad/docs/deploy/clusters/connect-nodes [CDP]: /consul/docs/connect/dataplane diff --git a/website/content/docs/secure/vault/index.mdx b/website/content/docs/secure/vault/index.mdx index 49eae9097..74492df1b 100644 --- a/website/content/docs/secure/vault/index.mdx +++ b/website/content/docs/secure/vault/index.mdx @@ -54,12 +54,14 @@ Jobs that need access to Vault may specify which Vault cluster to use with the ## Compatibility -* Nomad versions 1.4.0 and above are compatible with any currently supported - version of Vault. +All currently supported versions of Nomad are compatible with recent versions of +Vault. -| | Vault 1.13.0+ | -|--------------|---------------| -| Nomad 1.4.0+ | ✅ | +| | Vault 1.18.0+ | Vault 1.19.0+ | Vault 1.20.0+ | +|---------------|---------------|---------------|---------------| +| Nomad 1.10.0+ | ✅ | ✅ | ✅ | +| Nomad 1.9.0+ | ✅ | ✅ | ✅ | +| Nomad 1.8.0+ | ✅ | ✅ | ✅ | [Consul Template]: https://github.com/hashicorp/consul-template [Vault]: https://www.vaultproject.io/ 'Vault by HashiCorp' From 501608ca683980cb26f1b42bbb7a3f913485beaa Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 28 Jul 2025 14:12:43 -0400 Subject: [PATCH 13/27] docs: document handling of unset affinity/constraint values (#26354) Affinities and contraints use similar feasibility checking logic to determine if a given node matches (although affinities don't support all the same operators). Most operators don't allow `value` to be unset. Update the docs to reflect this. Fixes: https://github.com/hashicorp/nomad/issues/24983 --- website/content/docs/job-specification/affinity.mdx | 7 ++++--- website/content/docs/job-specification/constraint.mdx | 4 +++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/website/content/docs/job-specification/affinity.mdx b/website/content/docs/job-specification/affinity.mdx index b51f2fc31..6e0c81e4c 100644 --- a/website/content/docs/job-specification/affinity.mdx +++ b/website/content/docs/job-specification/affinity.mdx @@ -89,10 +89,11 @@ allocations. For a detailed explanation of these values and their behavior, please see the [operator values section](#operator-values). -- `value` `(string: "")` - Specifies the value to compare the attribute against - using the specified operation. This can be a literal value, another attribute, - or any [Nomad interpolated +- `value` `(string: )` - Specifies the value to compare the attribute + against using the specified operation. This can be a literal value, another + attribute, or any [Nomad interpolated values](/nomad/docs/reference/runtime-variable-interpolation#interpreted_node_vars). + The `value` field is required. - `weight` `(integer: 50)` - Specifies a weight for the affinity. The weight is used during scoring and must be an integer between -100 to 100. Negative weights act as diff --git a/website/content/docs/job-specification/constraint.mdx b/website/content/docs/job-specification/constraint.mdx index 548c1b58c..4a9ceb184 100644 --- a/website/content/docs/job-specification/constraint.mdx +++ b/website/content/docs/job-specification/constraint.mdx @@ -99,7 +99,9 @@ allocations. - `value` `(string: "")` - Specifies the value to compare the attribute against using the specified operation. This can be a literal value, another attribute, or any [Nomad interpolated - values](/nomad/docs/reference/runtime-variable-interpolation#interpreted_node_vars). + values](/nomad/docs/reference/runtime-variable-interpolation#interpreted_node_vars). The + value field is required except for when using the `is_set`, `is_not_set`, + `distinct_hosts`, or `distinct_property` operators. ### `operator` values From e062f87b07b5b55cb4291b52c671a08d93e4532a Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 28 Jul 2025 16:28:27 -0400 Subject: [PATCH 14/27] docs: fix typo in redirect URL domain (#26384) --- website/data/docs-nav-data.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index ee1b0f11b..affa461e0 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -671,19 +671,19 @@ "routes": [ { "title": "Exec2", - "href": "http://develoepr.hashicorp.com/nomad/plugins/drivers/exec2" + "href": "http://developer.hashicorp.com/nomad/plugins/drivers/exec2" }, { "title": "Podman", - "href": "http://develoepr.hashicorp.com/nomad/plugins/drivers/podman" + "href": "http://developer.hashicorp.com/nomad/plugins/drivers/podman" }, { "title": "Virt Beta", - "href": "http://develoepr.hashicorp.com/nomad/plugins/drivers/virt" + "href": "http://developer.hashicorp.com/nomad/plugins/drivers/virt" }, { "title": "Community", - "href": "http://develoepr.hashicorp.com/nomad/plugins/drivers/community" + "href": "http://developer.hashicorp.com/nomad/plugins/drivers/community" } ] } From 4ce937884dd73728c99da857121aab53a20bc153 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 29 Jul 2025 08:23:06 -0400 Subject: [PATCH 15/27] scheduler: move result mutation into `computeStop` (#26351) The `computeStop` method returns two values that only get used to mutate the result and the untainted set. Move the mutation into the method to match the work done in #26325. Ref: https://github.com/hashicorp/nomad/pull/26325 Ref: https://hashicorp.atlassian.net/browse/NMD-819 --- scheduler/reconciler/reconcile_cluster.go | 80 ++++++++++++----------- 1 file changed, 42 insertions(+), 38 deletions(-) diff --git a/scheduler/reconciler/reconcile_cluster.go b/scheduler/reconciler/reconcile_cluster.go index f870fdc5b..b437b3a3d 100644 --- a/scheduler/reconciler/reconcile_cluster.go +++ b/scheduler/reconciler/reconcile_cluster.go @@ -532,15 +532,11 @@ func (a *AllocReconciler) computeGroup(group string, all allocSet) (*ReconcileRe allocNameIndexForGroup := nameIndex result.TaskGroupAllocNameIndexes = map[string]*AllocNameIndex{group: allocNameIndexForGroup} - // Stop any unneeded allocations and update the untainted set to not - // include stopped allocations. + // Stop any unneeded allocations and update the untainted set to not include + // stopped allocations. isCanarying := dstate != nil && dstate.DesiredCanaries != 0 && !dstate.Promoted - - stop, stopAllocs := a.computeStop(tg, nameIndex, untainted, migrate, lost, canaries, isCanarying, lostLaterEvals) - result.Stop = append(result.Stop, stopAllocs...) - - result.DesiredTGUpdates[group].Stop += uint64(len(stop)) - untainted = untainted.difference(stop) + a.computeStop(tg, nameIndex, &untainted, migrate, lost, canaries, + isCanarying, lostLaterEvals, result) // Do inplace upgrades where possible and capture the set of upgrades that // need to be done destructively. @@ -592,7 +588,7 @@ func (a *AllocReconciler) computeGroup(group string, all allocSet) (*ReconcileRe result.DesiredTGUpdates[group].Ignore += uint64(len(destructive)) } - a.computeMigrations(result, migrate, tg, isCanarying) + a.computeMigrations(migrate, isCanarying, tg, result) result.Deployment = a.createDeployment( tg.Name, tg.Update, existingDeployment, dstate, all, destructive, int(result.DesiredTGUpdates[group].InPlaceUpdate)) @@ -999,8 +995,8 @@ func (a *AllocReconciler) computeDestructiveUpdates(destructive allocSet, underP // computeMigrations updates the result with the stops and placements required // for migration. -func (a *AllocReconciler) computeMigrations(result *ReconcileResults, migrate allocSet, - tg *structs.TaskGroup, isCanarying bool) { +func (a *AllocReconciler) computeMigrations(migrate allocSet, isCanarying bool, + tg *structs.TaskGroup, result *ReconcileResults) { result.DesiredTGUpdates[tg.Name].Migrate += uint64(len(migrate)) @@ -1083,28 +1079,38 @@ func (a *AllocReconciler) isDeploymentComplete(groupName string, destructive, in return complete } -// computeStop returns the set of allocations that are marked for stopping given -// the group definition, the set of allocations in various states and whether we -// are canarying. +// computeStop updates the result with the set of allocations we want to stop +// given the group definition, the set of allocations in various states and +// whether we are canarying. It mutates the untainted set with the remaining +// allocations. func (a *AllocReconciler) computeStop(group *structs.TaskGroup, nameIndex *AllocNameIndex, - untainted, migrate, lost, canaries allocSet, isCanarying bool, followupEvals map[string]string) (allocSet, []AllocStopResult) { + untainted *allocSet, migrate, lost, canaries allocSet, + isCanarying bool, followupEvals map[string]string, result *ReconcileResults) { - // Mark all lost allocations for stopAllocSet. - var stopAllocSet allocSet - stopAllocSet = stopAllocSet.union(lost) + // Mark all lost allocations for stop and copy the original untainted set as + // our working set (so that we only mutate the untainted set at the end) + var stop, working allocSet + stop = stop.union(lost) + working = working.union(*untainted) var stopAllocResult []AllocStopResult + defer func() { + result.Stop = append(result.Stop, stopAllocResult...) + result.DesiredTGUpdates[group.Name].Stop += uint64(len(stop)) + *untainted = untainted.difference(stop) + }() + delayedResult := markDelayed(lost, structs.AllocClientStatusLost, sstructs.StatusAllocLost, followupEvals) stopAllocResult = append(stopAllocResult, delayedResult...) // If we are still deploying or creating canaries, don't stop them if isCanarying { - untainted = untainted.difference(canaries) + working = working.difference(canaries) } // Remove disconnected allocations so they won't be stopped - knownUntainted := untainted.filterOutByClientStatus(structs.AllocClientStatusUnknown) + knownUntainted := working.filterOutByClientStatus(structs.AllocClientStatusUnknown) // Hot path the nothing to do case // @@ -1116,29 +1122,29 @@ func (a *AllocReconciler) computeStop(group *structs.TaskGroup, nameIndex *Alloc // corrected in `computePlacements` remove := len(knownUntainted) + len(migrate) - group.Count if remove <= 0 { - return stopAllocSet, stopAllocResult + return } // Filter out any terminal allocations from the untainted set // This is so that we don't try to mark them as stopped redundantly - untainted = untainted.filterByTerminal() + working = working.filterByTerminal() // Prefer stopping any alloc that has the same name as the canaries if we // are promoted if !isCanarying && len(canaries) != 0 { canaryNames := canaries.nameSet() - for id, alloc := range untainted.difference(canaries) { + for id, alloc := range working.difference(canaries) { if _, match := canaryNames[alloc.Name]; match { - stopAllocSet[id] = alloc + stop[id] = alloc stopAllocResult = append(stopAllocResult, AllocStopResult{ Alloc: alloc, StatusDescription: sstructs.StatusAllocNotNeeded, }) - delete(untainted, id) + delete(working, id) remove-- if remove == 0 { - return stopAllocSet, stopAllocResult + return } } } @@ -1157,51 +1163,49 @@ func (a *AllocReconciler) computeStop(group *structs.TaskGroup, nameIndex *Alloc StatusDescription: sstructs.StatusAllocNotNeeded, }) delete(migrate, id) - stopAllocSet[id] = alloc + stop[id] = alloc nameIndex.UnsetIndex(alloc.Index()) remove-- if remove == 0 { - return stopAllocSet, stopAllocResult + return } } } // Select the allocs with the highest count to remove removeNames := nameIndex.Highest(uint(remove)) - for id, alloc := range untainted { + for id, alloc := range working { if _, ok := removeNames[alloc.Name]; ok { - stopAllocSet[id] = alloc + stop[id] = alloc stopAllocResult = append(stopAllocResult, AllocStopResult{ Alloc: alloc, StatusDescription: sstructs.StatusAllocNotNeeded, }) - delete(untainted, id) + delete(working, id) remove-- if remove == 0 { - return stopAllocSet, stopAllocResult + return } } } // It is possible that we didn't stop as many as we should have if there // were allocations with duplicate names. - for id, alloc := range untainted { - stopAllocSet[id] = alloc + for id, alloc := range working { + stop[id] = alloc stopAllocResult = append(stopAllocResult, AllocStopResult{ Alloc: alloc, StatusDescription: sstructs.StatusAllocNotNeeded, }) - delete(untainted, id) + delete(working, id) remove-- if remove == 0 { - return stopAllocSet, stopAllocResult + return } } - - return stopAllocSet, stopAllocResult } // If there are allocations reconnecting we need to reconcile them and their From 1209c34be1b2e18b1c11d74c7035d17624916bc6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Jul 2025 16:21:18 -0400 Subject: [PATCH 16/27] chore(deps): bump github.com/docker/docker (#26390) Bumps [github.com/docker/docker](https://github.com/docker/docker) from 28.3.2+incompatible to 28.3.3+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v28.3.2...v28.3.3) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-version: 28.3.3+incompatible dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4a4b752f0..00cd7db81 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/creack/pty v1.1.24 github.com/distribution/reference v0.6.0 github.com/docker/cli v28.3.2+incompatible - github.com/docker/docker v28.3.2+incompatible + github.com/docker/docker v28.3.3+incompatible github.com/docker/go-connections v0.5.0 github.com/docker/go-units v0.5.0 github.com/dustin/go-humanize v1.0.1 diff --git a/go.sum b/go.sum index f4b9d2393..d5fa946bf 100644 --- a/go.sum +++ b/go.sum @@ -866,8 +866,8 @@ github.com/docker/cli v28.3.2+incompatible h1:mOt9fcLE7zaACbxW1GeS65RI67wIJrTnqS github.com/docker/cli v28.3.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v28.3.2+incompatible h1:wn66NJ6pWB1vBZIilP8G3qQPqHy5XymfYn5vsqeA5oA= -github.com/docker/docker v28.3.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= From 5dc7e7fe2583030721dace2da6fbcdf64939d534 Mon Sep 17 00:00:00 2001 From: Aimee Ukasick Date: Wed, 30 Jul 2025 09:02:28 -0500 Subject: [PATCH 17/27] Docs: Chore: Ent labels (#26323) * replace outdated tutorial links * update more tutorial links * Add CE/ENT or ENT to left nav * remove ce/ent labels * revert enterprise features --- website/content/api-docs/acl/index.mdx | 2 +- website/content/api-docs/acl/policies.mdx | 2 +- website/content/api-docs/acl/tokens.mdx | 2 +- website/content/api-docs/events.mdx | 2 +- website/content/api-docs/index.mdx | 2 +- .../content/api-docs/operator/autopilot.mdx | 8 ++-- website/content/api-docs/search.mdx | 3 +- .../content/api-docs/sentinel-policies.mdx | 2 +- website/content/api-docs/ui.mdx | 4 +- website/content/commands/acl/index.mdx | 4 +- .../content/commands/operator/utilization.mdx | 4 +- .../docs/architecture/security/index.mdx | 6 +-- website/content/docs/concepts/job.mdx | 8 ++-- website/content/docs/concepts/plugins/cni.mdx | 2 +- .../docs/concepts/stateful-deployments.mdx | 2 +- website/content/docs/configuration/acl.mdx | 2 +- website/content/docs/configuration/audit.mdx | 3 +- .../content/docs/configuration/autopilot.mdx | 6 +-- website/content/docs/configuration/client.mdx | 3 -- .../content/docs/configuration/reporting.mdx | 3 +- .../content/docs/configuration/sentinel.mdx | 3 +- .../docs/deploy/clusters/federate-regions.mdx | 4 +- website/content/docs/govern/index.mdx | 2 +- website/content/docs/govern/namespaces.mdx | 2 +- website/content/docs/govern/sentinel.mdx | 4 +- .../content/docs/job-declare/multiregion.mdx | 25 ++++------- .../docs/job-declare/nomad-variables.mdx | 3 +- .../docs/job-specification/lifecycle.mdx | 2 +- .../docs/job-specification/multiregion.mdx | 2 +- .../content/docs/job-specification/update.mdx | 6 +-- website/content/docs/manage/autopilot.mdx | 11 ++--- .../content/docs/monitor/inspect-cluster.mdx | 2 +- .../docs/monitor/inspect-workloads.mdx | 2 +- .../docs/other-specifications/acl-policy.mdx | 2 +- .../docs/other-specifications/variables.mdx | 8 ++-- website/content/docs/quickstart.mdx | 3 -- .../docs/release-notes/nomad/v1-10-x.mdx | 2 +- .../docs/release-notes/nomad/v1_9_x.mdx | 4 +- .../docs/stateful-workloads/csi-volumes.mdx | 2 +- .../content/partials/task-driver-intro.mdx | 2 +- website/content/tools/autoscaling/agent.mdx | 9 ++-- .../agent/dynamic_application_sizing.mdx | 2 +- website/content/tools/autoscaling/cli.mdx | 7 ++- website/content/tools/autoscaling/index.mdx | 2 +- .../plugins/target/app-sizing-nomad.mdx | 10 ++--- website/content/tools/autoscaling/policy.mdx | 4 +- website/data/api-docs-nav-data.json | 25 +++++++++++ website/data/commands-nav-data.json | 5 +++ website/data/docs-nav-data.json | 45 +++++++++++++++++++ website/data/tools-nav-data.json | 25 +++++++++++ 50 files changed, 192 insertions(+), 103 deletions(-) diff --git a/website/content/api-docs/acl/index.mdx b/website/content/api-docs/acl/index.mdx index 318f42b70..86a7bea3f 100644 --- a/website/content/api-docs/acl/index.mdx +++ b/website/content/api-docs/acl/index.mdx @@ -10,6 +10,6 @@ description: |- The `/acl` endpoints provide access to the ACL subsystem which includes ACL bootstrapping, ACL Policies, ACL Roles, ACL Tokens, ACL Auth Methods, and ACL Binding Rules. For more details about ACLs, please see the [ACL -Guide](/nomad/tutorials/access-control). +Guide](/nomad/docs/secure/acl). Please choose a subsection in the navigation for more information. diff --git a/website/content/api-docs/acl/policies.mdx b/website/content/api-docs/acl/policies.mdx index 5788d6bed..97535fc33 100644 --- a/website/content/api-docs/acl/policies.mdx +++ b/website/content/api-docs/acl/policies.mdx @@ -7,7 +7,7 @@ description: The /acl/policy endpoints are used to configure and manage ACL poli # ACL Policies HTTP API The `/acl/policies` and `/acl/policy/` endpoints are used to manage ACL policies. -For more details about ACLs, please see the [ACL Guide](/nomad/tutorials/access-control). +For more details about ACLs, please see the [ACL Guide](/nomad/docs/secure/acl). ## List Policies diff --git a/website/content/api-docs/acl/tokens.mdx b/website/content/api-docs/acl/tokens.mdx index 983ec3003..768a03ec2 100644 --- a/website/content/api-docs/acl/tokens.mdx +++ b/website/content/api-docs/acl/tokens.mdx @@ -7,7 +7,7 @@ description: The /acl/token/ endpoints are used to configure and manage ACL toke # ACL Tokens HTTP API The `/acl/bootstrap`, `/acl/tokens`, and `/acl/token/` endpoints are used to manage ACL tokens. -For more details about ACLs, please see the [ACL Guide](/nomad/tutorials/access-control). +For more details about ACLs, please see the [ACL Guide](/nomad/docs/secure/acl). ## Bootstrap Token diff --git a/website/content/api-docs/events.mdx b/website/content/api-docs/events.mdx index 57e44bdff..43fe25014 100644 --- a/website/content/api-docs/events.mdx +++ b/website/content/api-docs/events.mdx @@ -85,7 +85,7 @@ by default, requiring a management token. | Node | Node | | NodeDrain | Node | | NodePool | NodePool | -| Operator | UtilizationSnapshot (Enterprise only) | +| Operator | UtilizationSnapshot | | Service | Service Registrations | ### Event Types diff --git a/website/content/api-docs/index.mdx b/website/content/api-docs/index.mdx index 7e2f53fa2..88ee01852 100644 --- a/website/content/api-docs/index.mdx +++ b/website/content/api-docs/index.mdx @@ -68,7 +68,7 @@ administration. Several endpoints in Nomad use or require ACL tokens to operate. The token are used to authenticate the request and determine if the request is allowed based on the associated authorizations. Tokens are specified per-request by using the `X-Nomad-Token` request header or with the Bearer scheme in the authorization header set to the `SecretID` of an ACL Token. -For more details about ACLs, please see the [ACL Guide](/nomad/tutorials/access-control). +For more details about ACLs, please see the [ACL Guide](/nomad/docs/secure/acl). ## Authentication diff --git a/website/content/api-docs/operator/autopilot.mdx b/website/content/api-docs/operator/autopilot.mdx index cfe2ea2a9..439355eff 100644 --- a/website/content/api-docs/operator/autopilot.mdx +++ b/website/content/api-docs/operator/autopilot.mdx @@ -106,15 +106,15 @@ The table below shows this endpoint's support for cluster. Only takes effect if all servers are running Raft protocol version 3 or higher. Must be a duration value such as `30s`. -- `EnableRedundancyZones` `(bool: false)` - (Enterprise-only) Specifies whether +- `EnableRedundancyZones` `(bool: false)` - Specifies whether to enable redundancy zones. -- `DisableUpgradeMigration` `(bool: false)` - (Enterprise-only) Disables Autopilot's +- `DisableUpgradeMigration` `(bool: false)` - Disables Autopilot's upgrade migration strategy in Nomad Enterprise of waiting until enough newer-versioned servers have been added to the cluster before promoting any of them to voters. -- `EnableCustomUpgrades` `(bool: false)` - (Enterprise-only) Specifies whether to +- `EnableCustomUpgrades` `(bool: false)` - Specifies whether to enable using custom upgrade versions when performing migrations. ## Read Health @@ -222,7 +222,7 @@ $ curl \ status of 200 will be returned. If `Healthy` is false, then a status of 429 will be returned. - This API endpoint return with more information in Nomad Enterprise. This is + This API endpoint returns with more information in Nomad Enterprise. This is not present in Nomad Community Edition. diff --git a/website/content/api-docs/search.mdx b/website/content/api-docs/search.mdx index 247ed3945..b205ee526 100644 --- a/website/content/api-docs/search.mdx +++ b/website/content/api-docs/search.mdx @@ -585,7 +585,8 @@ $ curl \ If the search Context is `all` when fuzzy searching, the object types that are identified only with UUIDs are also concurrently prefix-searched. Those types include -deployments, evals, volumes, and quotas (Enterprise). +deployments, evaluations, volumes, and resource quotas. Note that resource quotas is +an Enterprise feature. ### Sample Payload (prefix match) diff --git a/website/content/api-docs/sentinel-policies.mdx b/website/content/api-docs/sentinel-policies.mdx index efe2015f9..95a8d3780 100644 --- a/website/content/api-docs/sentinel-policies.mdx +++ b/website/content/api-docs/sentinel-policies.mdx @@ -11,7 +11,7 @@ description: >- The `/sentinel/policies` and `/sentinel/policy/` endpoints are used to manage Sentinel policies. For more details about Sentinel policies, please see the [Sentinel Policy Guide](/nomad/docs/govern/sentinel). -Sentinel endpoints are only available when ACLs are enabled. For more details about ACLs, please see the [ACL Guide](/nomad/tutorials/access-control). +Sentinel endpoints are only available when ACLs are enabled. For more details about ACLs, please see the [ACL Guide](/nomad/docs/secure/acl). diff --git a/website/content/api-docs/ui.mdx b/website/content/api-docs/ui.mdx index f8da33e0c..9a78bef50 100644 --- a/website/content/api-docs/ui.mdx +++ b/website/content/api-docs/ui.mdx @@ -495,9 +495,7 @@ This page lists all allocations for a CSI plugin. Each allocation includes the s - `type` `(string: "")` - Filters the list of allocations to only those with a matching plugin type. Value must be `controller`, `node`, or unset. -## Optimize - -~> **Enterprise Only!** This feature depends on functionality only present in Nomad Autoscaler Enterprise. +## Optimize This page lists all recommendations surfaced by [dynamic application sizing](/nomad/tools/autoscaling#dynamic-application-sizing). This page will automatically redirect to the recommendation summary route for the first recommendation sorted by relevance. diff --git a/website/content/commands/acl/index.mdx b/website/content/commands/acl/index.mdx index b313baace..6500a9a16 100644 --- a/website/content/commands/acl/index.mdx +++ b/website/content/commands/acl/index.mdx @@ -77,6 +77,6 @@ subcommands are available: [roledelete]: /nomad/commands/acl/role/delete [roleinfo]: /nomad/commands/acl/role/info [rolelist]: /nomad/commands/acl/role/list -[secure-guide]: /nomad/tutorials/access-control -[federated]: //nomad/docs/deploy/clusters/federate-regions +[secure-guide]: /nomad/docs/secure/acl +[federated]: /nomad/docs/deploy/clusters/federate-regions [`authoritative_region`]: /nomad/docs/configuration/server#authoritative_region diff --git a/website/content/commands/operator/utilization.mdx b/website/content/commands/operator/utilization.mdx index 0a0c8c1f9..796582e08 100644 --- a/website/content/commands/operator/utilization.mdx +++ b/website/content/commands/operator/utilization.mdx @@ -16,14 +16,12 @@ hours. If ACLs are enabled, this command requires a token with the `operator:write` capability. - + Refer to the [manual license utilization reporting](/nomad/docs/enterprise/license/utilization-reporting) page to learn more about reporting your Nomad Enterprise license utilization. - - ## Options - `-message` `(string; "")` - Provide context about the conditions under which diff --git a/website/content/docs/architecture/security/index.mdx b/website/content/docs/architecture/security/index.mdx index 6097b2812..14c751743 100644 --- a/website/content/docs/architecture/security/index.mdx +++ b/website/content/docs/architecture/security/index.mdx @@ -31,7 +31,7 @@ but the general mechanisms for a secure Nomad deployment revolve around: internal abuse by preventing unauthenticated access to network components within the cluster. -- **[ACLs](/nomad/tutorials/access-control)** Enables authorization for +- **[ACLs](/nomad/docs/secure/acl)** Enables authorization for authenticated connections by granting capabilities to ACL tokens. - **[Namespaces](/nomad/docs/govern/namespaces)** Access to read @@ -50,7 +50,7 @@ granularity may change depending on your team's use case where rigorous roles can be accurately defined and managed using the [Nomad backend secret engine for Vault](/vault/docs/secrets/nomad). This is described further with getting started steps using a development server -[here](/nomad/tutorials/access-control). +[here](/nomad/docs/secure/acl). It's important to note that there's no traditional concept of a user within Nomad itself. @@ -135,7 +135,7 @@ recommendations accordingly. when using `tls.verify_https_client=false`. You can use a reverse proxy or other external means to restrict access to them. -- [ACLs enabled](/nomad/tutorials/access-control) The +- [ACLs enabled](/nomad/docs/secure/acl) The access control list (ACL) system provides a capability-based control mechanism for Nomad administrators allowing for custom roles (typically within Vault) to be tied to an individual human or machine operator diff --git a/website/content/docs/concepts/job.mdx b/website/content/docs/concepts/job.mdx index d73bb8cd0..0ee602638 100644 --- a/website/content/docs/concepts/job.mdx +++ b/website/content/docs/concepts/job.mdx @@ -160,19 +160,21 @@ jobs: configuration and examples. - The [Schedulers] page explains the different types of Nomad schedulers and how those schedulers run jobs. +- The [Declare jobs][deploy-jobs] section contains guides for creating a job, + using task drivers, and deployment strategies. These tutorial resources contain examples of different kinds of jobs and how to configure them: - The [Quick Start][quickstart] tutorials cover deploying an application and its resources, which include parameterized and periodic jobs. -- The [Deploy and Manage Jobs][deploy-jobs] tutorial contains examples of different types of jobs. -- The [Create Nomad Job Specifications][job-spec-tutorial] collection explains several aspects of job creation, including parameterized jobs and deploying a Java app on Nomad. +- The [Create Nomad Job Specifications][job-spec-tutorial] collection contains + guides for migrating a Java application to Nomad. [allocations]: /nomad/docs/glossary#allocation [deployment]: /nomad/docs/glossary/#deployment -[deploy-jobs]: /nomad/tutorials/manage-jobs/ +[deploy-jobs]: /nomad/docs/job-declare [job-spec]: /nomad/docs/job-specification [job-spec-tutorial]: /nomad/tutorials/job-specifications [quickstart]: /nomad/tutorials/get-started/gs-deploy-job diff --git a/website/content/docs/concepts/plugins/cni.mdx b/website/content/docs/concepts/plugins/cni.mdx index 832bdc1ef..dfecdc1bc 100644 --- a/website/content/docs/concepts/plugins/cni.mdx +++ b/website/content/docs/concepts/plugins/cni.mdx @@ -64,4 +64,4 @@ Nomad Networking documentation](/nomad/docs/networking/cni). [cni_spec]: https://www.cni.dev/docs/spec/ [cni_spec_net_config]: https://github.com/containernetworking/cni/blob/main/SPEC.md#configuration-format [cni_spec_plugin_config]: https://github.com/containernetworking/cni/blob/main/SPEC.md#plugin-configuration-objects -[nomad_install]: /nomad/tutorials/get-started/get-started-install#linux-post-installation-steps +[nomad_install]: /nomad/docs/deploy#linux-post-installation-steps diff --git a/website/content/docs/concepts/stateful-deployments.mdx b/website/content/docs/concepts/stateful-deployments.mdx index 21d6d35bf..bff80bfd5 100644 --- a/website/content/docs/concepts/stateful-deployments.mdx +++ b/website/content/docs/concepts/stateful-deployments.mdx @@ -76,7 +76,7 @@ and volumes: - [Considerations for Stateful Workloads](/nomad/docs/architecture/storage/stateful-workloads) explores the options for persistent storage of workloads running in Nomad. - The [Nomad volume specification][volumes] defines the schema for creating and registering volumes. - The [job specification `volume` block](/nomad/docs/job-specification/volume) lets you configure a group that requires a specific volume from the cluster. -- The [Stateful Workloads](/nomad/tutorials/stateful-workloads) tutorials explore techniques to run jobs that require access to persistent storage. +- The [Stateful Workloads](/nomad/docs/stateful-workloads) guides explore techniques to run jobs that require access to persistent storage. [allocation]: /nomad/docs/glossary#allocation [delete]: /nomad/api-docs/volumes#delete-task-group-host-volume-claims diff --git a/website/content/docs/configuration/acl.mdx b/website/content/docs/configuration/acl.mdx index 6161d1d20..146a44c57 100644 --- a/website/content/docs/configuration/acl.mdx +++ b/website/content/docs/configuration/acl.mdx @@ -66,6 +66,6 @@ acl { TTL value for an ACL token when setting expiration. This is used by the Nomad servers to validate ACL tokens and ACL authentication methods. -[secure-guide]: /nomad/tutorials/access-control +[secure-guide]: /nomad/docs/secure/acl [authoritative-region]: /nomad/docs/configuration/server#authoritative_region [Configure for multiple regions]: /nomad/docs/secure/acl/bootstrap#configure-for-multiple-regions diff --git a/website/content/docs/configuration/audit.mdx b/website/content/docs/configuration/audit.mdx index 4ed021b96..418d5ee8f 100644 --- a/website/content/docs/configuration/audit.mdx +++ b/website/content/docs/configuration/audit.mdx @@ -8,13 +8,14 @@ description: >- # `audit` Block in Agent Configuration - This page provides reference information for configuring audit logging behavior in the `audit` block of a Nomad agent configuration. Enable audit logs, define a sink to stream audit logs to, and change filter rules to exclude events from the audit log. + + ```hcl audit { enabled = true diff --git a/website/content/docs/configuration/autopilot.mdx b/website/content/docs/configuration/autopilot.mdx index 284e67c9e..18a90d3e1 100644 --- a/website/content/docs/configuration/autopilot.mdx +++ b/website/content/docs/configuration/autopilot.mdx @@ -46,17 +46,17 @@ autopilot { cluster. Only takes effect if all servers are running Raft protocol version 3 or higher. Must be a duration value such as `30s`. -- `enable_redundancy_zones` `(bool: false)` - Controls whether +- `enable_redundancy_zones` `(bool: false)` - Controls whether Autopilot separates servers into zones for redundancy, in conjunction with the [redundancy_zone](/nomad/docs/configuration/server#redundancy_zone) parameter. Only one server in each zone can be a voting member at one time. -- `disable_upgrade_migration` `(bool: false)` - Disables Autopilot's +- `disable_upgrade_migration` `(bool: false)` - Disables Autopilot's upgrade migration strategy in Nomad Enterprise of waiting until enough newer-versioned servers have been added to the cluster before promoting any of them to voters. -- `enable_custom_upgrades` `(bool: false)` - Specifies whether to +- `enable_custom_upgrades` `(bool: false)` - Specifies whether to enable using custom upgrade versions when performing migrations, in conjunction with the [upgrade_version](/nomad/docs/configuration/server#upgrade_version) parameter. diff --git a/website/content/docs/configuration/client.mdx b/website/content/docs/configuration/client.mdx index 388af74aa..6d2d4038f 100644 --- a/website/content/docs/configuration/client.mdx +++ b/website/content/docs/configuration/client.mdx @@ -283,9 +283,6 @@ Nomad never attempts to embed the `alloc_dir` in the chroot as doing so would ca ### `options` Parameters -~> Note: In Nomad 0.9 client configuration options for drivers were deprecated. -Refer to the [plugin block][plugin-block] documentation for more information. - The following is not an exhaustive list of options for only the Nomad client. To find the options supported by each individual Nomad driver, refer to the [drivers documentation](/nomad/docs/job-declare/task-driver). diff --git a/website/content/docs/configuration/reporting.mdx b/website/content/docs/configuration/reporting.mdx index 0a2a2bf60..b3d84fb87 100644 --- a/website/content/docs/configuration/reporting.mdx +++ b/website/content/docs/configuration/reporting.mdx @@ -8,11 +8,12 @@ description: >- # `reporting` Block in Agent Configuration - This page provides reference information for enabling automated license utilization reporting in the `reporting` block of a Nomad agent configuration. + + Configuration applies to agents running with [server mode enabled][server_mode_enabled]. diff --git a/website/content/docs/configuration/sentinel.mdx b/website/content/docs/configuration/sentinel.mdx index 8789e0867..a41b6ef14 100644 --- a/website/content/docs/configuration/sentinel.mdx +++ b/website/content/docs/configuration/sentinel.mdx @@ -8,13 +8,14 @@ description: >- # `sentinel` Block in Agent Configuration - This page provides reference information for configuring the Sentinel policy engine in the `sentinel` block of a Nomad agent configuration. Configure the path to the plugin that Nomad uses to import Sentinel policies and specify arguments to pass to that plugin on startup. + + ```hcl sentinel { import "custom-plugin" { diff --git a/website/content/docs/deploy/clusters/federate-regions.mdx b/website/content/docs/deploy/clusters/federate-regions.mdx index 2c23a003d..2ae028306 100644 --- a/website/content/docs/deploy/clusters/federate-regions.mdx +++ b/website/content/docs/deploy/clusters/federate-regions.mdx @@ -137,12 +137,12 @@ Error querying jobs: Unexpected response code: 500 (No path to region) - [Deployment Topology across Multiple Regions][multi-region] -[multi-region]: /nomad/tutorials/enterprise/production-reference-architecture-vm-with-consul#deployment-topology-across-multiple-regions +[multi-region]: /nomad/docs/deploy/production/reference-architecture#deployment-topology-across-multiple-regions [multi-region-pic]: /img/clusters/nomad-multi-region.png [nomad-server-members]: /nomad/commands/server/members [nomad-status]: /nomad/commands/status [nomad-tf]: https://github.com/hashicorp/nomad/tree/master/terraform#provision-a-nomad-cluster-in-the-cloud [ports-used]: /nomad/docs/deploy/production/requirements#ports-used -[reference-arch]: /nomad/tutorials/enterprise/production-reference-architecture-vm-with-consul +[reference-arch]: /nomad/docs/deploy/production/reference-architecture [region-config]: /nomad/docs/configuration#region [server-join]: /nomad/commands/server/join diff --git a/website/content/docs/govern/index.mdx b/website/content/docs/govern/index.mdx index 78f9c1b32..076c490f6 100644 --- a/website/content/docs/govern/index.mdx +++ b/website/content/docs/govern/index.mdx @@ -109,5 +109,5 @@ to be local to each region for low latency. [img_sentinel_overview]: /img/govern/sentinel.jpg -[`sentinel-override` capability]: /nomad/tutorials/access-control#sentinel-override +[`sentinel-override` capability]: /nomad/docs/secure/acl#sentinel-override [`server` stanza]: /nomad/docs/configuration/server diff --git a/website/content/docs/govern/namespaces.mdx b/website/content/docs/govern/namespaces.mdx index a800e2145..b9df280cf 100644 --- a/website/content/docs/govern/namespaces.mdx +++ b/website/content/docs/govern/namespaces.mdx @@ -136,7 +136,7 @@ For specific details about working with namespaces, consult the [namespace commands] and [HTTP API] documentation. -[acls]: /nomad/tutorials/access-control +[acls]: /nomad/docs/secure/acl [http api]: /nomad/api-docs/namespaces [img_ui_ns_dropdown]: /img/govern/nomad-ui-namespace-dropdown.png [namespace commands]: /nomad/commands/namespace diff --git a/website/content/docs/govern/sentinel.mdx b/website/content/docs/govern/sentinel.mdx index 72fdf6620..909ec7216 100644 --- a/website/content/docs/govern/sentinel.mdx +++ b/website/content/docs/govern/sentinel.mdx @@ -197,9 +197,9 @@ For specific details about working with Sentinel, consult the [`nomad sentinel` and [HTTP API] documentation. [`nomad sentinel` sub-commands]: /nomad/commands/sentinel -[`sentinel-override` capability]: /nomad/tutorials/access-control#sentinel-override +[`sentinel-override` capability]: /nomad/docs/secure/acl#sentinel-override [`server` stanza]: /nomad/docs/configuration/server -[acls]: /nomad/tutorials/access-control +[acls]: /nomad/docs/secure/acl [http api]: /nomad/api-docs/sentinel-policies [json job specification]: /nomad/api-docs/json-jobs [nomad enterprise]: https://www.hashicorp.com/products/nomad/ diff --git a/website/content/docs/job-declare/multiregion.mdx b/website/content/docs/job-declare/multiregion.mdx index 84fd52a30..11dda700a 100644 --- a/website/content/docs/job-declare/multiregion.mdx +++ b/website/content/docs/job-declare/multiregion.mdx @@ -8,12 +8,13 @@ description: |- # Configure multi-region deployments -Federated Nomad clusters enable users to submit jobs targeting any region -from any server even if that server resides in a different region. As of Nomad 0.12 -Enterprise, you can also submit jobs that are deployed to multiple -regions. This tutorial demonstrates multi-region deployments, including +Federated Nomad clusters enable you to submit jobs targeting any region +from any server even if that server resides in a different region. You may submit jobs that are deployed to multiple +regions. This guide demonstrates multi-region deployments, including configurable rollout and rollback strategies. + + You can create a multi-region deployment job by adding a [`multiregion`] stanza to the job as shown below. @@ -38,16 +39,6 @@ multiregion { } ``` - - -The functionality described here is available only in [Nomad -Enterprise](https://www.hashicorp.com/products/nomad/pricing/) with the -Multi-Cluster & Efficiency module. To explore Nomad Enterprise features, you can -sign up for a free 30-day trial from -[here](https://www.hashicorp.com/products/nomad/trial). - - - ## Prerequisites To perform the tasks described in this guide, you need to have two Nomad @@ -446,9 +437,9 @@ west f08122e5 successful [nomad-tf]: https://github.com/hashicorp/nomad/tree/master/terraform [server-join]: /nomad/commands/server/join [ports-used]: /nomad/docs/deploy/production/requirements#ports-used -[reference-arch]: /nomad/tutorials/enterprise/production-reference-architecture-vm-with-consul +[reference-arch]: /nomad/docs/deploy/production/reference-architecture [nomad-server-members]: /nomad/commands/server/members -[acls-track]: /nomad/tutorials/access-control +[acls-track]: /nomad/docs/secure/acl [updates-track]: /nomad/docs/job-declare/strategy/ [update-auto-revert]: /nomad/docs/job-specification/update#auto_revert ["task states"]: /nomad/docs/job-specification/update#health_check @@ -456,4 +447,4 @@ west f08122e5 successful [alloc-health-api]: /nomad/api-docs/deployments#set-allocation-health-in-deployment [`update` stanza]: /nomad/docs/job-specification/update [`nomad deployment unblock`]: /nomad/commands/deployment/unblock -[multi-region-topology]: /nomad/tutorials/enterprise/production-reference-architecture-vm-with-consul#multi-region +[multi-region-topology]:/nomad/docs/deploy/production/reference-architecture#multi-region diff --git a/website/content/docs/job-declare/nomad-variables.mdx b/website/content/docs/job-declare/nomad-variables.mdx index a13c74a8c..63a23e3a4 100644 --- a/website/content/docs/job-declare/nomad-variables.mdx +++ b/website/content/docs/job-declare/nomad-variables.mdx @@ -468,8 +468,7 @@ to specify Nomad's behavior when a value changes. ## Next steps Because Nomad Variables use functions in the template block to emit data to -Nomad jobs, consider learning more about templates in Nomad with the [Templates -collection](/nomad/tutorials/templates). +Nomad jobs, consider learning more about templates in Nomad with [Nomad Pack](/nomad/tools/nomad-pack). [Nomad Variables]: /nomad/docs/concepts/variables [Nomad Variables Access Control]: /nomad/tutorials/variables/variables-acls diff --git a/website/content/docs/job-specification/lifecycle.mdx b/website/content/docs/job-specification/lifecycle.mdx index 81e9b30df..9828f9c1a 100644 --- a/website/content/docs/job-specification/lifecycle.mdx +++ b/website/content/docs/job-specification/lifecycle.mdx @@ -49,7 +49,7 @@ Learn more about [Nomad's task dependencies][learn-taskdeps]. lifecycle task is long-lived (`sidecar = true`) and terminates, it will be restarted as long as the allocation is running. -[learn-taskdeps]: /nomad/tutorials/task-deps +[learn-taskdeps]: /nomad/docs/job-declare/task-dependencies [shutdown_delay]: /nomad/docs/job-specification/group#shutdown_delay [leader]: /nomad/docs/job-specification/task#leader diff --git a/website/content/docs/job-specification/multiregion.mdx b/website/content/docs/job-specification/multiregion.mdx index 7c8255b23..9f81b9001 100644 --- a/website/content/docs/job-specification/multiregion.mdx +++ b/website/content/docs/job-specification/multiregion.mdx @@ -14,7 +14,7 @@ The `multiregion` block specifies that a job will be deployed to multiple one specified by the `region` field or the `-region` command line flag to `nomad job run`. - + Federated Nomad clusters are members of the same gossip cluster but not the same raft cluster; they don't share their data stores. Each region in a diff --git a/website/content/docs/job-specification/update.mdx b/website/content/docs/job-specification/update.mdx index 4e2b7fbf4..2f9ecc423 100644 --- a/website/content/docs/job-specification/update.mdx +++ b/website/content/docs/job-specification/update.mdx @@ -269,7 +269,7 @@ group "two" { } ``` -[canary]: /nomad/docs/job-declare/strategy/blue-green-canary 'Nomad Canary Deployments' +[canary]: /nomad/docs/job-declare/strategy/blue-green-canary [checks]: /nomad/docs/job-specification/service#check -[rolling]: /nomad/docs/job-declare/strategy/rolling 'Nomad Rolling Upgrades' -[strategies]: /nomad/tutorials/job-updates 'Nomad Update Strategies' +[rolling]: /nomad/docs/job-declare/strategy/rolling +[strategies]: /nomad/docs/job-declare/strategy diff --git a/website/content/docs/manage/autopilot.mdx b/website/content/docs/manage/autopilot.mdx index a9e781b41..e53461691 100644 --- a/website/content/docs/manage/autopilot.mdx +++ b/website/content/docs/manage/autopilot.mdx @@ -154,12 +154,7 @@ must be healthy and stable for a certain amount of time before being promoted to a full, voting member. This can be configured via the `ServerStabilizationTime` setting. ---- - -~> The following Autopilot features are available only in [Nomad Enterprise] -version 0.8.0 and later. - -## Server read and scheduling scaling +## Server read and scheduling scaling With the [`non_voting_server`] option, a server can be explicitly marked as a non-voter and will never be promoted to a voting member. This can be useful when @@ -168,7 +163,7 @@ have data replicated to it, but it will not be part of the quorum that the leader must wait for before committing log entries. Non voting servers can also act as scheduling workers to increase scheduling throughput in large clusters. -## Redundancy zones +## Redundancy zones Prior to Autopilot, it was difficult to deploy servers in a way that took advantage of isolated failure domains such as AWS Availability Zones; users @@ -196,7 +191,7 @@ Nomad will then use these values to partition the servers by redundancy zone, and will aim to keep one voting server per zone. Extra servers in each zone will stay as non-voters on standby to be promoted if the active voter leaves or dies. -## Upgrade migrations +## Upgrade migrations Autopilot in Nomad Enterprise supports upgrade migrations by default. To disable this functionality, set `DisableUpgradeMigration` to true. diff --git a/website/content/docs/monitor/inspect-cluster.mdx b/website/content/docs/monitor/inspect-cluster.mdx index 61d307c51..3b45c0815 100644 --- a/website/content/docs/monitor/inspect-cluster.mdx +++ b/website/content/docs/monitor/inspect-cluster.mdx @@ -168,5 +168,5 @@ using the Nomad UI. [img-servers-list]: /img/monitor/guide-ui-img-servers-list.png [metadata]: /nomad/docs/configuration/client#meta [performed from the cli]: /nomad/docs/manage/migrate-workloads -[securing the web ui with acls]: /nomad/tutorials/access-control +[securing the web ui with acls]: /nomad/docs/secure/acl [typically three or five]: /nomad/docs/architecture/cluster/consensus#deployment-table diff --git a/website/content/docs/monitor/inspect-workloads.mdx b/website/content/docs/monitor/inspect-workloads.mdx index 81408120a..7f1388e31 100644 --- a/website/content/docs/monitor/inspect-workloads.mdx +++ b/website/content/docs/monitor/inspect-workloads.mdx @@ -218,4 +218,4 @@ the Nomad UI, learn how to inspect the state of your cluster using the Nomad UI. [in the job definition to reschedule]: /nomad/docs/job-specification/reschedule [local restart attempts]: /nomad/docs/job-specification/restart [periodic force]: /nomad/commands/job/periodic-force -[securing the web ui with acls]: /nomad/tutorials/access-control +[securing the web ui with acls]: /nomad/docs/secure/acl diff --git a/website/content/docs/other-specifications/acl-policy.mdx b/website/content/docs/other-specifications/acl-policy.mdx index 0c51850b0..4625cc345 100644 --- a/website/content/docs/other-specifications/acl-policy.mdx +++ b/website/content/docs/other-specifications/acl-policy.mdx @@ -479,7 +479,7 @@ plugin { } ``` -[Secure Nomad with Access Control]: /nomad/tutorials/access-control +[Secure Nomad with Access Control]: /nomad/docs/secure/acl [hcl]: https://github.com/hashicorp/hcl [hcl_syntax_spec]: https://github.com/hashicorp/hcl/blob/main/hclsyntax/spec.md [api_jobs]: /nomad/api-docs/jobs diff --git a/website/content/docs/other-specifications/variables.mdx b/website/content/docs/other-specifications/variables.mdx index aa0b3cab3..7d3e5853d 100644 --- a/website/content/docs/other-specifications/variables.mdx +++ b/website/content/docs/other-specifications/variables.mdx @@ -64,12 +64,14 @@ details on `path` and `items` name restrictions. ## Resources -Visit the [Nomad Variables tutorial][tutorial] to learn how to create variables, -configure access control for variables, and access variables from within job tasks. +- [Create and update Nomad variables](/nomad/tutorials/variables/variables-create) +- [Configure access control for Nomad + variables](/nomad/tutorials/variables/variables-acls) +- [Use Nomad variables in tasks](/nomad/docs/job-declare/nomad-variables) [nv]: /nomad/docs/concepts/variables [var-init]: /nomad/commands/var/init [var-put]: /nomad/commands/var/put [jobspecs]: /nomad/docs/job-specification [var-restrict]: /nomad/commands/var/put#restrictions -[tutorial]: /nomad/tutorials/variables + diff --git a/website/content/docs/quickstart.mdx b/website/content/docs/quickstart.mdx index 8aac8a197..bccbef1d3 100644 --- a/website/content/docs/quickstart.mdx +++ b/website/content/docs/quickstart.mdx @@ -24,8 +24,6 @@ We recommend these tutorials, which provision a Nomad cluster for you. - [Cluster Setup][]: Provision a Nomad cluster on AWS, Azure, or GCP. Enable Consul and access control lists (ACLs). This tutorial series has an associated code repository so you can review the Terraform provisioning scripts. -- [Enable gossip encryption for Nomad][interactive]: Launch an interactive lab - to use Nomad in your browser. ## Cloud installation @@ -73,7 +71,6 @@ Use one of the following methods to run a local Nomad sandbox environment: [installing-binary]: /nomad/docs/deploy [Get Started]: /nomad/tutorials/get-started [Cluster Setup]: /nomad/tutorials/cluster-setup -[interactive]: /nomad/tutorials/interactive/security-gossip-encryption [Provision a Nomad cluster on AWS]: https://github.com/hashicorp/nomad/blob/main/terraform/aws/README.md [Provision a Nomad cluster on Azure]: https://github.com/hashicorp/nomad/blob/main/terraform/azure/README.md diff --git a/website/content/docs/release-notes/nomad/v1-10-x.mdx b/website/content/docs/release-notes/nomad/v1-10-x.mdx index 5fdb70806..587a4aa77 100644 --- a/website/content/docs/release-notes/nomad/v1-10-x.mdx +++ b/website/content/docs/release-notes/nomad/v1-10-x.mdx @@ -217,4 +217,4 @@ These links take you to the changelogs on the GitHub website. [pkce]: https://oauth.net/2/pkce/ [oidc-concepts]: /nomad/docs/secure/authentication/oidc#client-assertions [oidc-trouble]: /nomad/docs/secure/authentication/oidc#oidc-configuration-troubleshooting -[oidc-tutorial]: /nomad/tutorials/single-sign-on/sso-oidc-keycloak +[oidc-tutorial]: /nomad/docs/secure/authentication/sso-pkce-jwt diff --git a/website/content/docs/release-notes/nomad/v1_9_x.mdx b/website/content/docs/release-notes/nomad/v1_9_x.mdx index fa39a1663..5dd37782b 100644 --- a/website/content/docs/release-notes/nomad/v1_9_x.mdx +++ b/website/content/docs/release-notes/nomad/v1_9_x.mdx @@ -17,11 +17,11 @@ make full use of your GPU investment. The device driver automatically detects MIGs. Refer to the [NVIDIA driver docs](/nomad/plugins/devices/nvidia) for details. -- **Quotas for device resources (Enterprise)**: This release extends quotas to +- **Quotas for device resources **: This release extends quotas to allow limiting [device resources](/nomad/docs/v1.9.x/job-specification/device). Refer to [Resource quotas](/nomad/docs/v1.9.x/other-specifications/quota) for configuration details. -- **NUMA awareness for device resources (Enterprise)**: Nomad is able to +- **NUMA awareness for device resources **: Nomad is able to correlate CPU cores with memory nodes and assign tasks to run on specific CPU cores so as to minimize any cross-memory node access patterns. With Nomad 1.9, we are expanding this functionality to also correlate diff --git a/website/content/docs/stateful-workloads/csi-volumes.mdx b/website/content/docs/stateful-workloads/csi-volumes.mdx index 5f07c27b3..20896686d 100644 --- a/website/content/docs/stateful-workloads/csi-volumes.mdx +++ b/website/content/docs/stateful-workloads/csi-volumes.mdx @@ -658,4 +658,4 @@ job’s lifecycle. [k8s-drivers]: https://kubernetes-csi.github.io/docs/drivers.html [nomad-tf]: https://github.com/hashicorp/nomad/tree/master/terraform#provision-a-nomad-cluster-in-the-cloud [password-security]: https://dev.mysql.com/doc/refman/8.0/en/password-security.html -[reference-arch]: /nomad/tutorials/enterprise/production-reference-architecture-vm-with-consul#high-availability +[reference-arch]: /nomad/docs/deploy/production/reference-architecture#high-availability diff --git a/website/content/partials/task-driver-intro.mdx b/website/content/partials/task-driver-intro.mdx index bec6fb920..a79e1ad08 100644 --- a/website/content/partials/task-driver-intro.mdx +++ b/website/content/partials/task-driver-intro.mdx @@ -9,6 +9,6 @@ client CPU, memory, and storage between tasks. Resource isolation effectiveness depends upon individual task driver implementations and underlying client operating systems. Task drivers include various security-related controls but do not use the Nomad client-to-task interface as a security boundary. Refer to the -[access control guide](/nomad/tutorials/access-control) for more information on +[access control guide](/nomad/docs/secure/acl) for more information on how to protect Nomad cluster operations. diff --git a/website/content/tools/autoscaling/agent.mdx b/website/content/tools/autoscaling/agent.mdx index 16bbbdfa0..2ca63b765 100644 --- a/website/content/tools/autoscaling/agent.mdx +++ b/website/content/tools/autoscaling/agent.mdx @@ -51,8 +51,9 @@ namespace "default" { } ``` -If running Nomad Autoscaler Enterprise, the following ACL policy addition is -needed to ensure it can read the Nomad Enterprise license. + + +If you are running Nomad Autoscaler Enterprise, add this ACL policy to ensure Nomad Autoscaler Enterprise is able to read the Nomad Enterprise license. ```hcl operator { @@ -60,6 +61,8 @@ operator { } ``` + + Other APM and target plugins may require additional ACLs; see the plugin documentation for more information. ## Load Order and Merging @@ -120,7 +123,7 @@ following actions. [hcl_v2]: https://github.com/hashicorp/hcl/tree/hcl2 [nomad_namespaces]: /nomad/docs/govern/namespaces -[nomad_acls]: /nomad/tutorials/access-control +[nomad_acls]: /nomad/docs/secure/acl [autoscaler_agent_nomad]: /nomad/tools/autoscaling/agent/nomad [autoscaler_cli_config]: /nomad/tools/autoscaling/cli#config [autoscaler_cli_policy_dir]: /nomad/tools/autoscaling/cli#policy-dir diff --git a/website/content/tools/autoscaling/agent/dynamic_application_sizing.mdx b/website/content/tools/autoscaling/agent/dynamic_application_sizing.mdx index 3acc904e9..04f9f34b0 100644 --- a/website/content/tools/autoscaling/agent/dynamic_application_sizing.mdx +++ b/website/content/tools/autoscaling/agent/dynamic_application_sizing.mdx @@ -12,7 +12,7 @@ description: >- This functionality only exists in Nomad Autoscaler Enterprise. This is not - present in the open source version of Nomad Autoscaler. + present in the Community Edition of Nomad Autoscaler. ~> Note that currently Prometheus is the only APM available for Dynamic Application Sizing diff --git a/website/content/tools/autoscaling/cli.mdx b/website/content/tools/autoscaling/cli.mdx index b5988c78a..7dfc60b40 100644 --- a/website/content/tools/autoscaling/cli.mdx +++ b/website/content/tools/autoscaling/cli.mdx @@ -94,8 +94,11 @@ passed in via CLI arguments. The `agent` command accepts the following arguments - `-policy-eval-workers=`: The number of workers to initialize for each queue, formatted as `:,:`. Nomad Autoscaler supports - `cluster` and `horizontal` queues. Nomad Autoscaler Enterprise supports additional - `vertical_mem` and `vertical_cpu` queues. + the following queues: + - `cluster` + - `horizontal` + - `vertical_mem` + - `vertical_cpu` - `-policy-source-disable-file`: Disable the sourcing of policies from disk. diff --git a/website/content/tools/autoscaling/index.mdx b/website/content/tools/autoscaling/index.mdx index ebf49fbf3..d2abb89c9 100644 --- a/website/content/tools/autoscaling/index.mdx +++ b/website/content/tools/autoscaling/index.mdx @@ -46,7 +46,7 @@ cluster. This functionality only exists in Nomad Autoscaler Enterprise. This is not - present in the open source version of Nomad Autoscaler. + present in the Community Edition of Nomad Autoscaler. Dynamic Application Sizing enables organizations to optimize the resource diff --git a/website/content/tools/autoscaling/plugins/target/app-sizing-nomad.mdx b/website/content/tools/autoscaling/plugins/target/app-sizing-nomad.mdx index d293ba125..4cdbf110b 100644 --- a/website/content/tools/autoscaling/plugins/target/app-sizing-nomad.mdx +++ b/website/content/tools/autoscaling/plugins/target/app-sizing-nomad.mdx @@ -6,16 +6,16 @@ description: The "app-sizing-nomad" target plugin scales a task resource. # Dynamic Application Sizing Nomad Task Target +The `app-sizing-nomad` target plugin reports on the current task resource value +as well as submits recommendations to Nomad via the [recommendations API endpoint][nomad_recommendations_api] +based on the result of the Dynamic Application Sizing strategy calculations. + This functionality only exists in Nomad Autoscaler Enterprise. This is not present in the open source version of Nomad Autoscaler. -~> Note that currently Prometheus is the only APM available for Dynamic Application Sizing - -The `app-sizing-nomad` target plugin reports on the current task resource value -as well as submits recommendations to Nomad via the [recommendations API endpoint][nomad_recommendations_api] -based on the result of the Dynamic Application Sizing strategy calculations. +Note that currently Prometheus is the only APM available for dynamic application sizing. ## Agent Configuration Options diff --git a/website/content/tools/autoscaling/policy.mdx b/website/content/tools/autoscaling/policy.mdx index 77fab0bae..714d0bb87 100644 --- a/website/content/tools/autoscaling/policy.mdx +++ b/website/content/tools/autoscaling/policy.mdx @@ -183,11 +183,11 @@ scaling "azure_cluster_policy" { } ``` -## Task (DAS) `policy` Options +## Task `policy` options for dynamic application sizing This functionality only exists in Nomad Autoscaler Enterprise. This is not - present in the open source version of Nomad Autoscaler. + present in the Community Edition of Nomad Autoscaler. The following options are available when using the Nomad Autoscaler Enterprise diff --git a/website/data/api-docs-nav-data.json b/website/data/api-docs-nav-data.json index 689540408..5b78b65f4 100644 --- a/website/data/api-docs-nav-data.json +++ b/website/data/api-docs-nav-data.json @@ -119,6 +119,11 @@ }, { "title": "License", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "operator/license" }, { @@ -139,6 +144,11 @@ }, { "title": "Utilization Reporting", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "operator/utilization" } ] @@ -149,10 +159,20 @@ }, { "title": "Quotas", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "quotas" }, { "title": "Recommendations", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "recommendations" }, { @@ -169,6 +189,11 @@ }, { "title": "Sentinel Policies", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "sentinel-policies" }, { diff --git a/website/data/commands-nav-data.json b/website/data/commands-nav-data.json index 3d3d210bc..172f0503d 100644 --- a/website/data/commands-nav-data.json +++ b/website/data/commands-nav-data.json @@ -682,6 +682,11 @@ }, { "title": "utilization", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "operator/utilization" } ] diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index affa461e0..70fc0d285 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -720,6 +720,11 @@ }, { "title": "Configure multi-region deployments", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "job-declare/multiregion" }, { @@ -920,6 +925,11 @@ }, { "title": "audit", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "configuration/audit" }, { @@ -965,6 +975,11 @@ }, { "title": "reporting", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "configuration/reporting" }, { @@ -977,6 +992,11 @@ }, { "title": "sentinel", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "configuration/sentinel" }, { @@ -1110,6 +1130,11 @@ }, { "title": "multiregion", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "job-specification/multiregion" }, { @@ -1118,6 +1143,11 @@ }, { "title": "numa", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "job-specification/numa" }, { @@ -1150,6 +1180,11 @@ }, { "title": "schedule", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "job-specification/schedule" }, { @@ -1227,6 +1262,11 @@ }, { "title": "Resource quota", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "other-specifications/quota" }, { @@ -1281,6 +1321,11 @@ }, { "title": "Sentinel policy", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "reference/sentinel-policy" }, { diff --git a/website/data/tools-nav-data.json b/website/data/tools-nav-data.json index 6dc6d6569..fcce32c78 100644 --- a/website/data/tools-nav-data.json +++ b/website/data/tools-nav-data.json @@ -74,6 +74,11 @@ }, { "title": "dynamic_application_sizing", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "autoscaling/agent/dynamic_application_sizing" }, { @@ -163,14 +168,29 @@ }, { "title": "Dynamic Application Sizing Average", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "autoscaling/plugins/strategy/app-sizing-avg" }, { "title": "Dynamic Application Sizing Max", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "autoscaling/plugins/strategy/app-sizing-max" }, { "title": "Dynamic Application Sizing Percentile", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "autoscaling/plugins/strategy/app-sizing-percentile" }, { @@ -208,6 +228,11 @@ }, { "title": "Dynamic Application Sizing", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, "path": "autoscaling/plugins/target/app-sizing-nomad" }, { From 6f81222ec8a99428468930f88288bfa209af2578 Mon Sep 17 00:00:00 2001 From: Gautam Kumar <54418242+Gautam3994@users.noreply.github.com> Date: Fri, 1 Aug 2025 18:32:47 +0530 Subject: [PATCH 18/27] CL: improve `acl policy self` output for management tokens (#26396) Improved the acl policy self CLI command to handle both management and client tokens. Management tokens now display a clear message indicating global access with no individual policies. Fixes: https://github.com/hashicorp/nomad/issues/26389 --- .changelog/26396.txt | 3 ++ command/acl_policy_self.go | 12 +++++ command/acl_policy_self_test.go | 96 ++++++++++++++++----------------- 3 files changed, 63 insertions(+), 48 deletions(-) create mode 100644 .changelog/26396.txt diff --git a/.changelog/26396.txt b/.changelog/26396.txt new file mode 100644 index 000000000..c8e60c7a1 --- /dev/null +++ b/.changelog/26396.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fixed a bug where `acl policy self` command would output all policies when used with a management token +``` diff --git a/command/acl_policy_self.go b/command/acl_policy_self.go index d39b22a60..d5f1528de 100644 --- a/command/acl_policy_self.go +++ b/command/acl_policy_self.go @@ -81,6 +81,18 @@ func (c *ACLPolicySelfCommand) Run(args []string) int { return 1 } + // Read the self token to check its type + token, _, err := client.ACLTokens().Self(nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error fetching token: %s", err)) + return 1 + } + + if token.Type == "management" { + c.Ui.Output("This is a management token. No individual policies are assigned.") + return 0 + } + policies, _, err := client.ACLPolicies().Self(nil) if err != nil { c.Ui.Error(fmt.Sprintf("Error fetching WI policies: %s", err)) diff --git a/command/acl_policy_self_test.go b/command/acl_policy_self_test.go index 21fd17e7c..d9835cdac 100644 --- a/command/acl_policy_self_test.go +++ b/command/acl_policy_self_test.go @@ -10,70 +10,70 @@ import ( "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/testutil" "github.com/shoenig/test/must" ) func TestACLPolicySelfCommand_ViaEnvVar(t *testing.T) { + const policyName = "nw" + config := func(c *agent.Config) { c.ACL.Enabled = true } - srv, _, url := testServer(t, true, config) - defer srv.Shutdown() + t.Cleanup(srv.Shutdown) - state := srv.Agent.Server().State() - - // Bootstrap an initial ACL token - token := srv.RootToken - must.NotNil(t, token) - - // Create a minimal job - job := mock.MinJob() - - // Add a job policy - polArgs := structs.ACLPolicyUpsertRequest{ - Policies: []*structs.ACLPolicy{ - { - Name: "nw", - Description: "test job can write to nodes", - Rules: `node { policy = "write" }`, - JobACL: &structs.JobACL{ - Namespace: job.Namespace, - JobID: job.ID, + createPolicy := func(t *testing.T, srv *agent.TestAgent, token *structs.ACLToken, job *structs.Job) { + args := structs.ACLPolicyUpsertRequest{ + Policies: []*structs.ACLPolicy{ + { + Name: policyName, + Description: "test job can write to nodes", + Rules: `node { policy = "write" }`, + JobACL: &structs.JobACL{ + Namespace: job.Namespace, + JobID: job.ID, + }, }, }, - }, - WriteRequest: structs.WriteRequest{ - Region: job.Region, - AuthToken: token.SecretID, - Namespace: job.Namespace, - }, + WriteRequest: structs.WriteRequest{ + Region: job.Region, + AuthToken: token.SecretID, + Namespace: job.Namespace, + }, + } + reply := structs.GenericResponse{} + must.NoError(t, srv.RPC("ACL.UpsertPolicies", &args, &reply)) } - polReply := structs.GenericResponse{} - must.NoError(t, srv.RPC("ACL.UpsertPolicies", &polArgs, &polReply)) - must.NonZero(t, polReply.WriteMeta.Index) - ui := cli.NewMockUi() - cmd := &ACLPolicySelfCommand{Meta: Meta{Ui: ui, flagAddress: url}} + runCommand := func(t *testing.T, url, token string) string { + ui := cli.NewMockUi() + cmd := &ACLPolicySelfCommand{Meta: Meta{Ui: ui, flagAddress: url}} + t.Setenv("NOMAD_TOKEN", token) + must.Zero(t, cmd.Run([]string{"-address=" + url})) + return ui.OutputWriter.String() + } - allocs := testutil.WaitForRunningWithToken(t, srv.RPC, job, token.SecretID) - must.Len(t, 1, allocs) + rootToken := srv.RootToken - alloc, err := state.AllocByID(nil, allocs[0].ID) - must.NoError(t, err) - must.MapContainsKey(t, alloc.SignedIdentities, "t") - wid := alloc.SignedIdentities["t"] + t.Run("SelfPolicy returns correct output for management token", func(t *testing.T) { + createPolicy(t, srv, rootToken, mock.MinJob()) - // Fetch info on policies with a JWT - t.Setenv("NOMAD_TOKEN", wid) - code := cmd.Run([]string{"-address=" + url}) - must.Zero(t, code) + out := runCommand(t, url, rootToken.SecretID) + must.StrContains(t, out, "This is a management token. No individual policies are assigned.") + }) - // Check the output - out := ui.OutputWriter.String() - must.StrContains(t, out, polArgs.Policies[0].Name) + t.Run("SelfPolicy returns correct output for client token", func(t *testing.T) { + job := mock.MinJob() + createPolicy(t, srv, rootToken, job) - // make sure we put the job ACLs in there, too - must.StrContains(t, out, polArgs.Policies[0].JobACL.JobID) + clientToken := mock.ACLToken() + clientToken.Policies = []string{policyName} + must.NoError(t, srv.Agent.Server().State().UpsertACLTokens( + structs.MsgTypeTestSetup, 1, []*structs.ACLToken{clientToken}, + )) + + out := runCommand(t, url, clientToken.SecretID) + must.StrContains(t, out, policyName) + must.StrContains(t, out, job.ID) + }) } From d709accaf5a1fec85f2dfef27b51256a723d9430 Mon Sep 17 00:00:00 2001 From: tehut Date: Fri, 1 Aug 2025 10:26:59 -0700 Subject: [PATCH 19/27] Add nomad monitor export command (#26178) * Add MonitorExport command and handlers * Implement autocomplete * Require nomad in serviceName * Fix race in StreamReader.Read * Add and use framer.Flush() to coordinate function exit * Add LogFile to client/Server config and read NomadLogPath in rpcHandler instead of HTTPServer * Parameterize StreamFixed stream size --- .changelog/26178.txt | 3 + api/agent.go | 14 +- api/fs.go | 15 +- client/agent_endpoint.go | 156 ++++++---- client/agent_endpoint_test.go | 81 ++++++ client/config/config.go | 3 + client/lib/streamframer/framer.go | 26 +- client/structs/structs.go | 31 ++ command/agent/agent.go | 5 +- command/agent/agent_endpoint.go | 119 +++++++- command/agent/agent_endpoint_test.go | 199 +++++++++++++ command/agent/config.go | 6 + command/agent/http.go | 1 + command/agent/monitor/export_monitor.go | 275 ++++++++++++++++++ command/agent/monitor/monitor_test.go | 105 ++++++- command/agent/monitor/stream_helpers.go | 250 ++++++++++++++++ command/agent/monitor/stream_helpers_test.go | 250 ++++++++++++++++ command/agent/monitor/test_helpers.go | 99 +++++++ command/agent_monitor.go | 26 +- command/agent_monitor_export.go | 209 +++++++++++++ command/agent_monitor_export_test.go | 93 ++++++ command/alloc_fs.go | 33 +-- command/commands.go | 5 + command/helpers.go | 34 +++ command/helpers_test.go | 123 ++++++++ nomad/client_agent_endpoint.go | 196 +++++++++---- nomad/client_agent_endpoint_test.go | 85 ++++++ nomad/config.go | 3 + website/content/commands/monitor/export.mdx | 79 +++++ .../{monitor.mdx => monitor/index.mdx} | 0 website/data/commands-nav-data.json | 10 +- 31 files changed, 2354 insertions(+), 180 deletions(-) create mode 100644 .changelog/26178.txt create mode 100644 command/agent/monitor/export_monitor.go create mode 100644 command/agent/monitor/stream_helpers.go create mode 100644 command/agent/monitor/stream_helpers_test.go create mode 100644 command/agent/monitor/test_helpers.go create mode 100644 command/agent_monitor_export.go create mode 100644 command/agent_monitor_export_test.go create mode 100644 website/content/commands/monitor/export.mdx rename website/content/commands/{monitor.mdx => monitor/index.mdx} (100%) diff --git a/.changelog/26178.txt b/.changelog/26178.txt new file mode 100644 index 000000000..a3674d07c --- /dev/null +++ b/.changelog/26178.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Added monitor export cli command to retrieve journald logs or the contents of the Nomad log file for a given Nomad agent +``` diff --git a/api/agent.go b/api/agent.go index 295d159a0..ee8ff65fd 100644 --- a/api/agent.go +++ b/api/agent.go @@ -302,8 +302,20 @@ func (a *Agent) Host(serverID, nodeID string, q *QueryOptions) (*HostDataRespons // Monitor returns a channel which will receive streaming logs from the agent // Providing a non-nil stopCh can be used to close the connection and stop log streaming func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { + frames, errCh := a.monitorHelper(stopCh, q, "/v1/agent/monitor") + return frames, errCh +} + +// MonitorExport returns a channel which will receive streaming logs from the agent +// Providing a non-nil stopCh can be used to close the connection and stop log streaming +func (a *Agent) MonitorExport(stopCh <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { + frames, errCh := a.monitorHelper(stopCh, q, "/v1/agent/monitor/export") + return frames, errCh +} + +func (a *Agent) monitorHelper(stopCh <-chan struct{}, q *QueryOptions, path string) (chan *StreamFrame, chan error) { errCh := make(chan error, 1) - r, err := a.client.newRequest("GET", "/v1/agent/monitor") + r, err := a.client.newRequest("GET", path) if err != nil { errCh <- err return nil, errCh diff --git a/api/fs.go b/api/fs.go index f6b831c30..8e65f60ab 100644 --- a/api/fs.go +++ b/api/fs.go @@ -389,12 +389,23 @@ func (f *FrameReader) Read(p []byte) (n int, err error) { case <-unblock: return 0, nil case err := <-f.errCh: - return 0, err + // check for race with f.frames before returning error + select { + case frame, ok := <-f.frames: + if !ok { + return 0, io.EOF + } + f.frame = frame + + // Store the total offset into the file + f.byteOffset = int(f.frame.Offset) + default: + return 0, err + } case <-f.cancelCh: return 0, io.EOF } } - // Copy the data out of the frame and update our offset n = copy(p, f.frame.Data[f.frameOffset:]) f.frameOffset += n diff --git a/client/agent_endpoint.go b/client/agent_endpoint.go index a47dcde05..cb43ac3ae 100644 --- a/client/agent_endpoint.go +++ b/client/agent_endpoint.go @@ -10,19 +10,16 @@ import ( "io" "time" + log "github.com/hashicorp/go-hclog" + metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-msgpack/v2/codec" - + sframer "github.com/hashicorp/nomad/client/lib/streamframer" + cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/command/agent/host" "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/command/agent/pprof" "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" - - log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" - - sframer "github.com/hashicorp/nomad/client/lib/streamframer" - cstructs "github.com/hashicorp/nomad/client/structs" ) type Agent struct { @@ -32,6 +29,7 @@ type Agent struct { func NewAgentEndpoint(c *Client) *Agent { a := &Agent{c: c} a.c.streamingRpcs.Register("Agent.Monitor", a.monitor) + a.c.streamingRpcs.Register("Agent.MonitorExport", a.monitorExport) return a } @@ -84,7 +82,6 @@ func (a *Agent) Profile(args *structs.AgentPprofRequest, reply *structs.AgentPpr func (a *Agent) monitor(conn io.ReadWriteCloser) { defer metrics.MeasureSince([]string{"client", "agent", "monitor"}, time.Now()) defer conn.Close() - // Decode arguments var args cstructs.MonitorRequest decoder := codec.NewDecoder(conn, structs.MsgpackHandle) @@ -117,7 +114,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - monitor := monitor.New(512, a.c.logger, &log.LoggerOptions{ + m := monitor.New(512, a.c.logger, &log.LoggerOptions{ JSONFormat: args.LogJSON, Level: logLevel, IncludeLocation: args.LogIncludeLocation, @@ -143,8 +140,8 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { <-ctx.Done() }() - logCh := monitor.Start() - defer monitor.Stop() + logCh := m.Start() + defer m.Stop() initialOffset := int64(0) // receive logs and build frames @@ -164,49 +161,11 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { case <-ctx.Done(): break LOOP } + } }() - - var streamErr error -OUTER: - for { - select { - case frame, ok := <-frames: - if !ok { - // frame may have been closed when an error - // occurred. Check once more for an error. - select { - case streamErr = <-errCh: - // There was a pending error! - default: - // No error, continue on - } - - break OUTER - } - - var resp cstructs.StreamErrWrapper - if args.PlainText { - resp.Payload = frame.Data - } else { - if err := frameCodec.Encode(frame); err != nil { - streamErr = err - break OUTER - } - - resp.Payload = buf.Bytes() - buf.Reset() - } - - if err := encoder.Encode(resp); err != nil { - streamErr = err - break OUTER - } - encoder.Reset(conn) - case <-ctx.Done(): - break OUTER - } - } + streamEncoder := monitor.NewStreamEncoder(&buf, conn, encoder, frameCodec, args.PlainText) + streamErr := streamEncoder.EncodeStream(frames, errCh, ctx, framer, false) if streamErr != nil { handleStreamResultError(streamErr, pointer.Of(int64(500)), encoder) @@ -214,7 +173,7 @@ OUTER: } } -// Host collects data about the host evironment running the agent +// Host collects data about the host environment running the agent func (a *Agent) Host(args *structs.HostDataRequest, reply *structs.HostDataResponse) error { aclObj, err := a.c.ResolveToken(args.AuthToken) if err != nil { @@ -233,3 +192,94 @@ func (a *Agent) Host(args *structs.HostDataRequest, reply *structs.HostDataRespo reply.HostData = data return nil } + +func (a *Agent) monitorExport(conn io.ReadWriteCloser) { + defer conn.Close() + + // Decode arguments + var args cstructs.MonitorExportRequest + + decoder := codec.NewDecoder(conn, structs.MsgpackHandle) + encoder := codec.NewEncoder(conn, structs.MsgpackHandle) + + if err := decoder.Decode(&args); err != nil { + handleStreamResultError(err, pointer.Of(int64(500)), encoder) + return + } + + // Check acl + if aclObj, err := a.c.ResolveToken(args.AuthToken); err != nil { + handleStreamResultError(err, pointer.Of(int64(403)), encoder) + return + } else if !aclObj.AllowAgentRead() { + handleStreamResultError(structs.ErrPermissionDenied, pointer.Of(int64(403)), encoder) + return + } + + nomadLogPath := a.c.GetConfig().LogFile + if args.OnDisk && nomadLogPath == "" { + handleStreamResultError(errors.New("No nomad log file defined"), pointer.Of(int64(400)), encoder) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + opts := monitor.MonitorExportOpts{ + Logger: a.c.logger, + LogsSince: args.LogsSince, + ServiceName: args.ServiceName, + NomadLogPath: nomadLogPath, + OnDisk: args.OnDisk, + Follow: args.Follow, + Context: ctx, + } + + frames := make(chan *sframer.StreamFrame, streamFramesBuffer) + errCh := make(chan error) + var buf bytes.Buffer + frameSize := 1024 + frameCodec := codec.NewEncoder(&buf, structs.JsonHandle) + + framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, frameSize) + framer.Run() + defer framer.Destroy() + + // goroutine to detect remote side closing + go func() { + if _, err := conn.Read(nil); err != nil { + // One end of the pipe explicitly closed, exit + cancel() + return + } + <-ctx.Done() + }() + + m, err := monitor.NewExportMonitor(opts) + if err != nil { + handleStreamResultError(err, pointer.Of(int64(500)), encoder) + return + } + var eofCancelCh chan error + + streamCh := m.Start() + initialOffset := int64(0) + eofCancel := !opts.Follow + + // receive logs and build frames + streamReader := monitor.NewStreamReader(streamCh, framer, int64(frameSize)) + go func() { + defer framer.Destroy() + if err := streamReader.StreamFixed(ctx, initialOffset, "", 0, eofCancelCh, eofCancel); err != nil { + select { + case errCh <- err: + case <-ctx.Done(): + } + } + }() + streamEncoder := monitor.NewStreamEncoder(&buf, conn, encoder, frameCodec, args.PlainText) + streamErr := streamEncoder.EncodeStream(frames, errCh, ctx, framer, true) + + if streamErr != nil { + handleStreamResultError(streamErr, pointer.Of(int64(500)), encoder) + return + } +} diff --git a/client/agent_endpoint_test.go b/client/agent_endpoint_test.go index 7c02691e5..3eb845bd1 100644 --- a/client/agent_endpoint_test.go +++ b/client/agent_endpoint_test.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "net" + "os" "strings" "testing" "time" @@ -18,11 +19,13 @@ import ( "github.com/hashicorp/nomad/client/config" sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/command/agent/pprof" "github.com/hashicorp/nomad/nomad" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/shoenig/test/must" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -446,3 +449,81 @@ func TestAgentHost_ACL(t *testing.T) { }) } } + +func TestMonitor_MonitorExport(t *testing.T) { + ci.Parallel(t) + + // Create test file + dir := t.TempDir() + f, err := os.CreateTemp(dir, "log") + must.NoError(t, err) + for range 1000 { + _, _ = f.WriteString(fmt.Sprintf("%v [INFO] it's log, it's log, it's big it's heavy it's wood", time.Now())) + } + f.Close() + testFilePath := f.Name() + testFileContents, err := os.ReadFile(testFilePath) + must.NoError(t, err) + + // start server + s, root, cleanupS := nomad.TestACLServer(t, nil) + defer cleanupS() + testutil.WaitForLeader(t, s.RPC) + defer cleanupS() + + c, cleanupC := TestClient(t, func(c *config.Config) { + c.ACLEnabled = true + c.Servers = []string{s.GetConfig().RPCAddr.String()} + c.LogFile = testFilePath + }) + + tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", mock.NodePolicy(acl.PolicyDeny)) + defer cleanupC() + + testutil.WaitForLeader(t, s.RPC) + + cases := []struct { + name string + expected string + serviceName string + token string + onDisk bool + expectErr bool + }{ + { + name: "happy_path_golden_file", + onDisk: true, + expected: string(testFileContents), + token: root.SecretID, + }, + { + name: "token_error", + onDisk: true, + expected: string(testFileContents), + token: tokenBad.SecretID, + expectErr: true, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + req := cstructs.MonitorExportRequest{ + NodeID: "this is checked in the CLI", + OnDisk: tc.onDisk, + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: tc.token, + }, + } + + builder, finalError := monitor.ExportMonitorClient_TestHelper(req, c, time.After(3*time.Second)) + if tc.expectErr { + must.Error(t, finalError) + return + } + must.NoError(t, err) + must.NotNil(t, builder) + must.Eq(t, strings.TrimSpace(tc.expected), strings.TrimSpace(builder.String())) + }) + + } +} diff --git a/client/config/config.go b/client/config/config.go index ebefd532a..12e295903 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -384,6 +384,9 @@ type Config struct { // NodeMaxAllocs is an optional field that sets the maximum number of // allocations a node can be assigned. Defaults to 0 and ignored if unset. NodeMaxAllocs int + + // LogFile is used by MonitorExport to stream a server's log file + LogFile string `hcl:"log_file"` } type APIListenerRegistrar interface { diff --git a/client/lib/streamframer/framer.go b/client/lib/streamframer/framer.go index 43a63398a..c01733105 100644 --- a/client/lib/streamframer/framer.go +++ b/client/lib/streamframer/framer.go @@ -97,6 +97,9 @@ type StreamFramer struct { // Captures whether the framer is running running bool + + // Confirms final flush sent + flushed bool } // NewStreamFramer creates a new stream framer that will output StreamFrames to @@ -107,7 +110,6 @@ func NewStreamFramer(out chan<- *StreamFrame, // Create the heartbeat and flush ticker heartbeat := time.NewTicker(heartbeatRate) flusher := time.NewTicker(batchWindow) - return &StreamFramer{ out: out, frameSize: frameSize, @@ -123,7 +125,6 @@ func NewStreamFramer(out chan<- *StreamFrame, // Destroy is used to cleanup the StreamFramer and flush any pending frames func (s *StreamFramer) Destroy() { s.l.Lock() - wasShutdown := s.shutdown s.shutdown = true @@ -204,7 +205,6 @@ OUTER: // Send() may have left a partial frame. Send it now. if !s.f.IsCleared() { s.f.Data = s.readData() - // Only send if there's actually data left if len(s.f.Data) > 0 { // Cannot select on shutdownCh as it's already closed @@ -281,6 +281,7 @@ func (s *StreamFramer) Send(file, fileEvent string, data []byte, offset int64) e // Flush till we are under the max frame size for s.data.Len() >= s.frameSize || force { + // Clear since are flushing the frame and capturing the file event. // Subsequent data frames will be flushed based on the data size alone // since they share the same fileevent. @@ -309,3 +310,22 @@ func (s *StreamFramer) Send(file, fileEvent string, data []byte, offset int64) e return nil } + +func (s *StreamFramer) IsFlushed() bool { + return s.flushed +} + +func (s *StreamFramer) Flush() bool { + s.l.Lock() + // Send() may have left a partial frame. Send it now. + s.f.Data = s.readData() + + // Only send if there's actually data left + if len(s.f.Data) > 0 { + s.out <- s.f.Copy() + } + s.flushed = true + + s.l.Unlock() + return s.IsFlushed() +} diff --git a/client/structs/structs.go b/client/structs/structs.go index 4b2fd8fe1..95e7bff9c 100644 --- a/client/structs/structs.go +++ b/client/structs/structs.go @@ -62,6 +62,37 @@ type MonitorRequest struct { structs.QueryOptions } +type MonitorExportRequest struct { + // NodeID is the node we want to track the logs of + NodeID string + + // ServerID is the server we want to track the logs of + ServerID string + + // ServiceName is the systemd service for which we want to retrieve logs + // Cannot be used with OnDisk + ServiceName string + + // Follow indicates that the monitor should continue to deliver logs until + // an outside interrupt. Cannot be used with OnDisk + Follow bool + + // LogsSince sets the lookback time for monitorExport logs in hours + LogsSince string + + // OnDisk indicates that nomad should export logs written to the configured nomad log path + OnDisk bool + + // NomadLogPath is set to the nomad log path by the HTTP agent if OnDisk + // is true + NomadLogPath string + + // PlainText disables base64 encoding. + PlainText bool + + structs.QueryOptions +} + // AllocFileInfo holds information about a file inside the AllocDir type AllocFileInfo struct { Name string diff --git a/command/agent/agent.go b/command/agent/agent.go index 5760b3747..54e467fa3 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -655,7 +655,8 @@ func convertServerConfig(agentConfig *Config) (*nomad.Config, error) { return nil, fmt.Errorf("number of schedulers should be between 0 and %d", runtime.NumCPU()) } - + // Copy LogFile config value + conf.LogFile = agentConfig.LogFile return conf, nil } @@ -753,7 +754,6 @@ func convertClientConfig(agentConfig *Config) (*clientconfig.Config, error) { if conf == nil { conf = clientconfig.DefaultConfig() } - conf.Servers = agentConfig.Client.Servers conf.DevMode = agentConfig.DevMode conf.EnableDebug = agentConfig.EnableDebug @@ -1016,6 +1016,7 @@ func convertClientConfig(agentConfig *Config) (*clientconfig.Config, error) { conf.Users = clientconfig.UsersConfigFromAgent(agentConfig.Client.Users) + conf.LogFile = agentConfig.LogFile return conf, nil } diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index 8c50f70a0..0c987dbdb 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -22,6 +22,7 @@ import ( "github.com/hashicorp/nomad/api" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/command/agent/host" + "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/command/agent/pprof" "github.com/hashicorp/nomad/nomad" "github.com/hashicorp/nomad/nomad/structs" @@ -211,6 +212,103 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( } s.parse(resp, req, &args.QueryOptions.Region, &args.QueryOptions) + codedErr := s.streamMonitor(resp, req, args, nodeID, "Agent.Monitor") + + return nil, codedErr +} + +func (s *HTTPServer) AgentMonitorExport(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Process and validate arguments + onDisk := false + onDiskBool, err := parseBool(req, "on_disk") + if err != nil { + return nil, CodedError(400, fmt.Sprintf("Unknown value for on-disk: %v", err)) + } + if onDiskBool != nil { + onDisk = *onDiskBool + } + + follow := false + followBool, err := parseBool(req, "follow") + if err != nil { + return nil, CodedError(400, fmt.Sprintf("Unknown value for follow: %v", err)) + } + if followBool != nil { + follow = *followBool + } + + plainText := false + plainTextBool, err := parseBool(req, "plain") + if err != nil { + return nil, CodedError(400, fmt.Sprintf("Unknown value for plain: %v", err)) + } + if plainTextBool != nil { + plainText = *plainTextBool + } + + logsSince := "72h" //default value + logsSinceStr := req.URL.Query().Get("logs_since") + if logsSinceStr != "" { + _, err := time.ParseDuration(logsSinceStr) + if err != nil { + return nil, CodedError(400, fmt.Sprintf("Unknown value for logs-since: %v", err)) + } + logsSince = logsSinceStr + } + + serviceName := req.URL.Query().Get("service_name") + + nodeID := req.URL.Query().Get("node_id") + serverID := req.URL.Query().Get("server_id") + + if nodeID != "" && serverID != "" { + return nil, CodedError(400, "Cannot target node and server simultaneously") + } + + if onDisk && serviceName != "" { + return nil, CodedError(400, "Cannot target journald and nomad log file simultaneously") + } + + if !onDisk && serviceName == "" { + return nil, CodedError(400, "Either -service-name or -on-disk must be set") + } + if onDisk && follow { + return nil, CodedError(400, "Cannot follow log file") + } + + if serviceName != "" { + if err := monitor.ScanServiceName(serviceName); err != nil { + return nil, CodedError(422, err.Error()) + } + } + + // Build the request and parse the ACL token + args := cstructs.MonitorExportRequest{ + NodeID: nodeID, + ServerID: serverID, + LogsSince: logsSince, + ServiceName: serviceName, + OnDisk: onDisk, + Follow: follow, + PlainText: plainText, + } + + // Force the Content-Type to avoid Go's http.ResponseWriter from + // detecting an incorrect or unsafe one. + if plainText { + resp.Header().Set("Content-Type", "text/plain") + } else { + resp.Header().Set("Content-Type", "application/json") + } + + s.parse(resp, req, &args.QueryOptions.Region, &args.QueryOptions) + codedErr := s.streamMonitor(resp, req, args, nodeID, "Agent.MonitorExport") + + return nil, codedErr +} + +func (s *HTTPServer) streamMonitor(resp http.ResponseWriter, req *http.Request, + args any, nodeID string, endpoint string) error { // Make the RPC var handler structs.StreamingRpcHandler @@ -219,24 +317,25 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( // Determine the handler to use useLocalClient, useClientRPC, useServerRPC := s.rpcHandlerForNode(nodeID) if useLocalClient { - handler, handlerErr = s.agent.Client().StreamingRpcHandler("Agent.Monitor") + handler, handlerErr = s.agent.Client().StreamingRpcHandler(endpoint) } else if useClientRPC { - handler, handlerErr = s.agent.Client().RemoteStreamingRpcHandler("Agent.Monitor") + handler, handlerErr = s.agent.Client().RemoteStreamingRpcHandler(endpoint) } else if useServerRPC { - handler, handlerErr = s.agent.Server().StreamingRpcHandler("Agent.Monitor") + handler, handlerErr = s.agent.Server().StreamingRpcHandler(endpoint) } else { - handlerErr = CodedError(400, "No local Node and node_id not provided") + handlerErr = CodedError(400, "No local Node") } // No node id monitor current server/client } else if srv := s.agent.Server(); srv != nil { - handler, handlerErr = srv.StreamingRpcHandler("Agent.Monitor") + handler, handlerErr = srv.StreamingRpcHandler(endpoint) } else { - handler, handlerErr = s.agent.Client().StreamingRpcHandler("Agent.Monitor") + handler, handlerErr = s.agent.Client().StreamingRpcHandler(endpoint) } if handlerErr != nil { - return nil, CodedError(500, handlerErr.Error()) + return CodedError(500, handlerErr.Error()) } + httpPipe, handlerPipe := net.Pipe() decoder := codec.NewDecoder(httpPipe, structs.MsgpackHandle) encoder := codec.NewEncoder(httpPipe, structs.MsgpackHandle) @@ -256,7 +355,6 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( // stream response go func() { defer cancel() - // Send the request if err := encoder.Encode(args); err != nil { errCh <- CodedError(500, err.Error()) @@ -293,7 +391,8 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( }() handler(handlerPipe) - cancel() + cancel() //this seems like it should be wrong to me but removing it didn't + // affect either truncation or short returns codedErr := <-errCh if codedErr != nil && @@ -302,7 +401,7 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( strings.Contains(codedErr.Error(), "EOF")) { codedErr = nil } - return nil, codedErr + return codedErr } func (s *HTTPServer) AgentForceLeaveRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index 7033814b9..49e2d06ca 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -27,6 +27,7 @@ import ( "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" + sframer "github.com/hashicorp/nomad/client/lib/streamframer" "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/pool" "github.com/hashicorp/nomad/nomad/mock" @@ -446,6 +447,204 @@ func TestHTTP_AgentMonitor(t *testing.T) { }) } +func TestHTTP_AgentMonitorExport(t *testing.T) { + ci.Parallel(t) + const expectedText = "log log log log log" + dir := t.TempDir() + testFile, err := os.CreateTemp(dir, "nomadtests") + must.NoError(t, err) + + _, err = testFile.Write([]byte(expectedText)) + must.NoError(t, err) + inlineFilePath := testFile.Name() + + config := func(c *Config) { + c.LogFile = inlineFilePath + } + + baseURL := "/v1/agent/monitor/export?" + cases := []struct { + name string + follow string + logsSince string + nodeID string + onDisk string + serviceName string + serverID string + + config func(c *Config) + errCode int + errString string + expectErr bool + want string + }{ + { + name: "happy_path", + follow: "false", + onDisk: "true", + logsSince: "9s", + + config: config, + expectErr: false, + want: expectedText, + }, + { + name: "invalid_onDisk", + follow: "false", + onDisk: "green", + + config: config, + errCode: 400, + expectErr: true, + errString: "Unknown value for on-disk", + }, + { + name: "invalid_follow", + follow: "green", + onDisk: "false", + + config: config, + errCode: 400, + expectErr: true, + errString: "Unknown value for follow", + }, + { + name: "invalid_service_name", + follow: "true", + onDisk: "false", + serviceName: "nomad%", + + config: config, + errCode: 422, + expectErr: true, + errString: "does not meet systemd conventions", + }, + { + name: "invalid_logsSince_duration", + follow: "false", + onDisk: "true", + serviceName: "nomad", + logsSince: "98seconds", + + config: config, + errCode: 400, + expectErr: true, + errString: `unknown unit "seconds" in duration`, + want: expectedText, + }, + { + name: "server_and_node", + follow: "false", + onDisk: "true", + nodeID: "doesn'tneedtobeuuid", + serverID: "doesntneedtobeuuid", + + config: config, + errCode: 400, + errString: "Cannot target node and server simultaneously", + expectErr: true, + want: expectedText, + }, + { + name: "onDisk_and_serviceName", + follow: "false", + onDisk: "true", + serviceName: "nomad", + nodeID: "doesn'tneedtobeuuid", + + config: config, + errCode: 400, + errString: "Cannot target journald and nomad log file simultaneously", + expectErr: true, + want: expectedText, + }, + { + name: "neither_onDisk_nor_serviceName", + follow: "false", + nodeID: "doesn'tneedtobeuuid", + + config: config, + errCode: 400, + errString: "Either -service-name or -on-disk must be set", + expectErr: true, + want: expectedText, + }, + { + name: "onDisk_and_follow", + follow: "true", + onDisk: "true", + nodeID: "doesn'tneedtobeuuid", + + config: config, + errCode: 400, + errString: "Cannot follow log file", + expectErr: true, + want: expectedText, + }, + { + name: "onDisk_and_no_log_file", + onDisk: "true", + + config: nil, + errCode: 400, + errString: "No nomad log file defined", + expectErr: true, + want: expectedText, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + httpTest(t, tc.config, func(s *TestAgent) { + // Prepare urlstring + urlVal := url.Values{} + urlParamPrep := func(k string, v string, failCase string, values *url.Values) { + if v != failCase { + values.Add(k, v) + } + } + + urlParamPrep("follow", tc.follow, "false", &urlVal) + urlParamPrep("logs_since", tc.logsSince, "", &urlVal) + urlParamPrep("on_disk", tc.onDisk, "", &urlVal) + urlParamPrep("node_id", tc.nodeID, "", &urlVal) + urlParamPrep("server_id", tc.serverID, "", &urlVal) + urlParamPrep("service_name", tc.serviceName, "", &urlVal) + urlString := baseURL + urlVal.Encode() + + req, err := http.NewRequest(http.MethodGet, urlString, nil) + must.NoError(t, err) + + resp := newClosableRecorder() + defer resp.Close() + var ( + builder strings.Builder + frame sframer.StreamFrame + ) + + _, err = s.Server.AgentMonitorExport(resp, req) + if tc.expectErr { + t.Log(err.Error()) + must.Eq(t, tc.errCode, err.(HTTPCodedError).Code()) + must.StrContains(t, err.Error(), tc.errString) + return + } + + must.NoError(t, err) + output, err := io.ReadAll(resp.Body) + must.NoError(t, err) + + err = json.Unmarshal(output, &frame) + if err != nil && err != io.EOF { + must.NoError(t, err) + } + + builder.WriteString(string(frame.Data)) + must.Eq(t, tc.want, builder.String()) + }) + }) + } +} + // Scenarios when Pprof requests should be available // see https://github.com/hashicorp/nomad/issues/6496 // +---------------+------------------+--------+------------------+ diff --git a/command/agent/config.go b/command/agent/config.go index e6311a6af..4cce6d309 100644 --- a/command/agent/config.go +++ b/command/agent/config.go @@ -424,6 +424,9 @@ type ClientConfig struct { // NodeMaxAllocs sets the maximum number of allocations per node // Defaults to 0 and ignored if unset. NodeMaxAllocs int `hcl:"node_max_allocs"` + + // LogFile is used by MonitorExport to stream a client's log file + LogFile string `hcl:"log_file"` } func (c *ClientConfig) Copy() *ClientConfig { @@ -756,6 +759,9 @@ type ServerConfig struct { // expected to complete before the server is considered healthy. Without // this, the server can hang indefinitely waiting for these. StartTimeout string `hcl:"start_timeout"` + + // LogFile is used by MonitorExport to stream a server's log file + LogFile string `hcl:"log_file"` } func (s *ServerConfig) Copy() *ServerConfig { diff --git a/command/agent/http.go b/command/agent/http.go index 52c552677..2cabf64ff 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -472,6 +472,7 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) { // "application/json" Content-Type depending on the ?plain= query // parameter. s.mux.HandleFunc("/v1/agent/monitor", s.wrap(s.AgentMonitor)) + s.mux.HandleFunc("/v1/agent/monitor/export", s.wrap(s.AgentMonitorExport)) s.mux.HandleFunc("/v1/agent/pprof/", s.wrapNonJSON(s.AgentPprofRequest)) diff --git a/command/agent/monitor/export_monitor.go b/command/agent/monitor/export_monitor.go new file mode 100644 index 000000000..b86485ec4 --- /dev/null +++ b/command/agent/monitor/export_monitor.go @@ -0,0 +1,275 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package monitor + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "runtime" + "slices" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-hclog" +) + +const defaultBufSize = 512 + +// ExportMonitor implements the Monitor interface for testing +type ExportMonitor struct { + sync.Mutex + + logCh chan []byte + logger hclog.Logger + + // doneCh coordinates breaking out of the export loop + doneCh chan struct{} + + // ExportReader can read from the cli or the NomadFilePath + ExportReader *ExportReader + + bufSize int +} + +type MonitorExportOpts struct { + Logger hclog.Logger + + // LogsSince sets the lookback time for monitorExport logs in hours + LogsSince string + + // OnDisk indicates that nomad should export logs written to the configured nomad log path + OnDisk bool + + // ServiceName is the systemd service for which we want to retrieve logs + // Cannot be used with OnDisk + ServiceName string + + // NomadLogPath is set to the nomad log path by the HTTP agent if OnDisk + // is true + NomadLogPath string + + // Follow indicates that the monitor should continue to deliver logs until + // an outside interrupt + Follow bool + + // Context passed from client to close the cmd and exit the function + Context context.Context + + // ExportMonitor's buffer size, defaults to 512 if unset by caller + BufSize int +} + +type ExportReader struct { + io.Reader + Cmd *exec.Cmd + UseCli bool + Follow bool +} + +// NewExportMonitor validates and prepares the appropriate reader before +// returning a new ExportMonitor or the appropriate error +func NewExportMonitor(opts MonitorExportOpts) (*ExportMonitor, error) { + var ( + exportReader *ExportReader + bufSize int + ) + + if runtime.GOOS != "linux" && + opts.ServiceName != "" { + return nil, errors.New("journald log monitoring only available on linux") + } + + if opts.BufSize == 0 { + bufSize = defaultBufSize + } else { + bufSize = opts.BufSize + } + + if opts.OnDisk && opts.ServiceName == "" { + e, prepErr := fileReader(opts) + if prepErr != nil { + return nil, prepErr + } + exportReader = e + } + + if opts.ServiceName != "" && !opts.OnDisk { + e, prepErr := cliReader(opts) + if prepErr != nil { + return nil, prepErr + } + exportReader = e + } + + sw := ExportMonitor{ + logger: hclog.Default().Named("export"), + doneCh: make(chan struct{}, 1), + logCh: make(chan []byte, bufSize), + bufSize: bufSize, + ExportReader: exportReader, + } + + return &sw, nil +} + +// ScanServiceName checks that the length, prefix and suffix conform to +// systemd conventions and ensures the service name includes the word 'nomad' +func ScanServiceName(input string) error { + prefix := "" + // invalid if prefix and suffix together are > 255 char + if len(input) > 255 { + return errors.New("service name too long") + } + + if isNomad := strings.Contains(input, "nomad"); !isNomad { + return errors.New(`service name must include 'nomad`) + } + + // if there is a suffix, check against list of valid suffixes + // and set prefix to exclude suffix index, else set prefix + splitInput := strings.Split(input, ".") + if len(splitInput) < 2 { + prefix = input + } else { + suffix := splitInput[len(splitInput)-1] + validSuffix := []string{ + "service", + "socket", + "device", + "mount", + "automount", + "swap", + "target", + "path", + "timer", + "slice", + "scope", + } + if valid := slices.Contains(validSuffix, suffix); !valid { + return errors.New("invalid suffix") + } + prefix = strings.Join(splitInput[:len(splitInput)-1], "") + } + + safe, _ := regexp.MatchString(`^[\w\\._-]*(@[\w\\._-]+)?$`, prefix) + if !safe { + return fmt.Errorf("%s does not meet systemd conventions", prefix) + } + return nil +} + +func cliReader(opts MonitorExportOpts) (*ExportReader, error) { + isCli := true + // Vet servicename again + if err := ScanServiceName(opts.ServiceName); err != nil { + return nil, err + } + cmdDuration := "72 hours" + if opts.LogsSince != "" { + parsedDur, err := time.ParseDuration(opts.LogsSince) + if err != nil { + return nil, err + } + cmdDuration = parsedDur.String() + } + // build command with vetted inputs + cmdArgs := []string{"-xu", opts.ServiceName, "--since", fmt.Sprintf("%s ago", cmdDuration)} + + if opts.Follow { + cmdArgs = append(cmdArgs, "-f") + } + cmd := exec.CommandContext(opts.Context, "journalctl", cmdArgs...) + + // set up reader + stdOut, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + stdErr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + multiReader := io.MultiReader(stdOut, stdErr) + cmd.Start() + + return &ExportReader{multiReader, cmd, isCli, opts.Follow}, nil +} + +func fileReader(opts MonitorExportOpts) (*ExportReader, error) { + notCli := false + file, err := os.Open(opts.NomadLogPath) + if err != nil { + return nil, err + } + return &ExportReader{file, nil, notCli, opts.Follow}, nil + +} + +// Stop stops the monitoring process +func (d *ExportMonitor) Stop() { + select { + case _, ok := <-d.doneCh: + if !ok { + if d.ExportReader.UseCli { + d.ExportReader.Cmd.Wait() + } + close(d.logCh) + return + } + default: + } + close(d.logCh) +} + +// Start reads data from the monitor's ExportReader into its logCh +func (d *ExportMonitor) Start() <-chan []byte { + // Read, copy, and send to channel until we hit EOF or error + go func() { + defer d.Stop() + logChunk := make([]byte, d.bufSize) + + for { + n, readErr := d.ExportReader.Read(logChunk) + if readErr != nil && readErr != io.EOF { + d.logger.Error("unable to read logs into channel", readErr.Error()) + return + } + + d.Write(logChunk[:n]) + + if readErr == io.EOF { + break + } + } + close(d.doneCh) + }() + return d.logCh +} + +// Write attempts to send latest log to logCh +// it drops the log if channel is unavailable to receive +func (d *ExportMonitor) Write(p []byte) (n int) { + d.Lock() + defer d.Unlock() + + // ensure logCh is still open + select { + case <-d.doneCh: + return + default: + } + + bytes := make([]byte, len(p)) + copy(bytes, p) + + d.logCh <- bytes + + return len(p) +} diff --git a/command/agent/monitor/monitor_test.go b/command/agent/monitor/monitor_test.go index 306ae8859..32a108411 100644 --- a/command/agent/monitor/monitor_test.go +++ b/command/agent/monitor/monitor_test.go @@ -4,13 +4,17 @@ package monitor import ( + "context" "fmt" + "os" "strings" + "testing" "time" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/ci" + "github.com/shoenig/test/must" "github.com/stretchr/testify/require" ) @@ -27,7 +31,6 @@ func TestMonitor_Start(t *testing.T) { logCh := m.Start() defer m.Stop() - go func() { logger.Debug("test log") time.Sleep(10 * time.Millisecond) @@ -90,3 +93,103 @@ TEST: } } } + +func TestMonitor_Export(t *testing.T) { + ci.Parallel(t) + const ( + expectedText = "log log log log log" + ) + + dir := t.TempDir() + f, err := os.CreateTemp(dir, "log") + must.NoError(t, err) + for range 1000 { + _, _ = f.WriteString(fmt.Sprintf("%v [INFO] it's log, it's log, it's big it's heavy it's wood", time.Now())) + } + f.Close() + goldenFilePath := f.Name() + goldenFileContents, err := os.ReadFile(goldenFilePath) + must.NoError(t, err) + + testFile, err := os.CreateTemp("", "nomadtest") + must.NoError(t, err) + + _, err = testFile.Write([]byte(expectedText)) + must.NoError(t, err) + inlineFilePath := testFile.Name() + + logger := log.NewInterceptLogger(&log.LoggerOptions{ + Level: log.Error, + }) + ctx, cancel := context.WithCancel(context.Background()) + cases := []struct { + name string + opts MonitorExportOpts + expected string + expectClose bool + }{ + { + name: "happy_path_logpath_long_file", + opts: MonitorExportOpts{ + Context: ctx, + Logger: logger, + OnDisk: true, + NomadLogPath: goldenFilePath, + }, + expected: string(goldenFileContents), + }, + { + name: "happy_path_logpath_short_file", + opts: MonitorExportOpts{ + Context: ctx, + Logger: logger, + OnDisk: true, + NomadLogPath: inlineFilePath, + }, + expected: expectedText, + }, + { + name: "close client context", + opts: MonitorExportOpts{ + Context: ctx, + Logger: logger, + OnDisk: true, + NomadLogPath: inlineFilePath, + }, + expected: expectedText, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + + monitor, err := NewExportMonitor(tc.opts) + must.NoError(t, err) + logCh := monitor.Start() + if tc.expectClose { + cancel() + } + + var builder strings.Builder + + TEST: + for { + select { + case log, ok := <-logCh: + if !ok { + break TEST + } + builder.Write(log) + default: + continue + } + + } + + if !tc.expectClose { + must.Eq(t, strings.TrimSpace(tc.expected), strings.TrimSpace(builder.String())) + } else { + must.Eq(t, builder.String(), "") + } + }) + } +} diff --git a/command/agent/monitor/stream_helpers.go b/command/agent/monitor/stream_helpers.go new file mode 100644 index 000000000..b82045f01 --- /dev/null +++ b/command/agent/monitor/stream_helpers.go @@ -0,0 +1,250 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package monitor + +import ( + "bytes" + "context" + "io" + "strings" + "sync" + "syscall" + + "github.com/hashicorp/go-msgpack/v2/codec" + sframer "github.com/hashicorp/nomad/client/lib/streamframer" + cstructs "github.com/hashicorp/nomad/client/structs" +) + +// StreamReader is used to process fixed length streams for consumers +// that rely on terminating the stream after hitting an EOF. The lock +// protects the buffer during reads +type StreamReader struct { + sync.Mutex + framer *sframer.StreamFramer + ch <-chan []byte + buf []byte + frameSize int64 +} + +// NewStreamReader takes a <-chan[]byte and *sframer.StreamFramer and returns +// a ready to use StreamReader that will allocate its buffer on first read +func NewStreamReader(ch <-chan []byte, framer *sframer.StreamFramer, frameSize int64) *StreamReader { + return &StreamReader{ + ch: ch, + framer: framer, + frameSize: frameSize, + } +} + +// Read reads stream data into the StreamReader's buffer and copies that +// data into p +func (r *StreamReader) Read(p []byte) (n int, err error) { + select { + case data, ok := <-r.ch: + if !ok && len(data) == 0 { + return 0, io.EOF + } + r.Lock() + r.buf = data + default: + return 0, nil + } + + n = copy(p, r.buf) + r.buf = r.buf[n:] + r.Unlock() + return n, nil +} + +// StreamFixed streams any fixed length data stream. If limit is greater than +// zero, the stream will end once that many bytes have been read. If eofCancelCh +// is triggered while at EOF, read one more frame and cancel the stream on the +// next EOF. If the connection is broken an EPIPE error is returned. +func (r *StreamReader) StreamFixed(ctx context.Context, offset int64, path string, limit int64, + eofCancelCh chan error, cancelAfterFirstEof bool) error { + defer r.framer.Flush() + parseFramerErr := func(err error) error { + if err == nil { + return nil + } + errMsg := err.Error() + + if strings.Contains(errMsg, io.ErrClosedPipe.Error()) { + // The pipe check is for tests + return syscall.EPIPE + } + + // The connection was closed by our peer + if strings.Contains(errMsg, syscall.EPIPE.Error()) || strings.Contains(errMsg, syscall.ECONNRESET.Error()) { + return syscall.EPIPE + } + + if strings.Contains(errMsg, "forcibly closed") { + return syscall.EPIPE + } + + return err + } + // streamFrameSize is the maximum number of bytes to send in a single frame + streamFrameSize := r.frameSize + + bufSize := streamFrameSize + if limit > 0 && limit < streamFrameSize { + bufSize = limit + } + streamBuffer := make([]byte, bufSize) + + var lastEvent string + + // Only watch file when there is a need for it + cancelReceived := cancelAfterFirstEof + +OUTER: + for { + // Read up to the max frame size + n, readErr := r.Read(streamBuffer) + + // Update the offset + offset += int64(n) + + // Return non-EOF errors + if readErr != nil && readErr != io.EOF { + return readErr + } + + // Send the frame + if n != 0 || lastEvent != "" { + if err := r.framer.Send(path, lastEvent, streamBuffer[:n], offset); err != nil { + return parseFramerErr(err) + } + } + + // Clear the last event + if lastEvent != "" { + lastEvent = "" + } + + // Just keep reading since we aren't at the end of the file so we can + // avoid setting up a file event watcher. + if readErr == nil { + continue + } + // At this point we can stop without waiting for more changes, + // because we have EOF and either we're not following at all, + // or we received an event from the eofCancelCh channel + // and last read was executed + if cancelReceived { + return nil + } + + for { + select { + case <-r.framer.ExitCh(): + return nil + case <-ctx.Done(): + return nil + case _, ok := <-eofCancelCh: + if !ok { + return nil + } + cancelReceived = true + continue OUTER + } + } + } +} + +// Destroy wraps the underlying framer's Destroy() call +func (r *StreamReader) Destroy() { + r.framer.Destroy() +} + +// Run wraps the underlying framer's Run() call +func (r *StreamReader) Run() { + r.framer.Run() +} + +// StreamEncoder consolidates logic used by monitor RPC handlers to encode and +// return stream data +type StreamEncoder struct { + buf *bytes.Buffer + conn io.ReadWriteCloser + encoder *codec.Encoder + frameCodec *codec.Encoder + plainText bool +} + +// NewStreamEncoder takes buf *bytes.Buffer, conn io.ReadWriteCloser, encoder *codec.Encoder +// frameCodec *codec.Encoder,and plainText bool and returns a NewStreamEncoder +func NewStreamEncoder(buf *bytes.Buffer, conn io.ReadWriteCloser, encoder *codec.Encoder, + frameCodec *codec.Encoder, plainText bool) StreamEncoder { + return StreamEncoder{ + buf: buf, + conn: conn, + encoder: encoder, + frameCodec: frameCodec, + plainText: plainText, + } +} + +// EncodeStream reads and encodes data from a chan *sframer.Streamframe until the +// channel is closed. If eofCancel is true,EncodeStream continues to read from the closed +// channel until the underlying framer reports it has flushed it's final frame +func (s *StreamEncoder) EncodeStream(frames chan *sframer.StreamFrame, + errCh chan error, ctx context.Context, framer *sframer.StreamFramer, + eofCancel bool) (err error) { + var streamErr error + localFlush := false +OUTER: + for { + select { + case frame, ok := <-frames: + if !ok { + // frame may have been closed when an error + // occurred. Check once more for an error. + select { + case streamErr = <-errCh: + return streamErr + // There was a pending error! + default: + // No error, continue on and let exitCh control breaking + } + // Confirm framer.Flush and localFlush if we're expecting EOF + if eofCancel { + _, ok := <-framer.ExitCh() + if !ok { + if framer.IsFlushed() && !localFlush { + localFlush = true + continue + } else if framer.IsFlushed() && localFlush { + break OUTER + } + } + } else { + break OUTER + } + } + + var resp cstructs.StreamErrWrapper + if s.plainText { + resp.Payload = frame.Data + } else { + if err := s.frameCodec.Encode(frame); err != nil && err != io.EOF { + return err + } + + resp.Payload = s.buf.Bytes() + s.buf.Reset() + } + if err := s.encoder.Encode(resp); err != nil { + return err + } + s.encoder.Reset(s.conn) + case <-ctx.Done(): + break OUTER + } + + } + return nil +} diff --git a/command/agent/monitor/stream_helpers_test.go b/command/agent/monitor/stream_helpers_test.go new file mode 100644 index 000000000..52bb38c9c --- /dev/null +++ b/command/agent/monitor/stream_helpers_test.go @@ -0,0 +1,250 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package monitor + +import ( + "context" + "io" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/nomad/ci" + sframer "github.com/hashicorp/nomad/client/lib/streamframer" + "github.com/shoenig/test/must" +) + +var writeLine = []byte("[INFO] log log log made of wood you are heavy but so good\n") + +func prepFile(t *testing.T) *os.File { + const loopCount = 10 + // Create test file to read from + dir := t.TempDir() + f, err := os.CreateTemp(dir, "log") + must.NoError(t, err) + + for range loopCount { + _, _ = f.Write(writeLine) + } + f.Close() + + // Create test file reader for stream set up + goldenFilePath := f.Name() + fileReader, err := os.Open(goldenFilePath) + must.NoError(t, err) + return fileReader +} + +func TestClientStreamReader_StreamFixed(t *testing.T) { + ci.Parallel(t) + + streamBytes := func(streamCh chan []byte, wg *sync.WaitGroup, file *os.File) { + go func() { + defer close(streamCh) + defer wg.Done() + logChunk := make([]byte, len(writeLine)) + for { + n, readErr := file.Read(logChunk) + if readErr != nil && readErr != io.EOF { + must.NoError(t, readErr) + } + + streamCh <- logChunk[:n] + if readErr == io.EOF { + break + } + } + }() + } + + cases := []struct { + name string + + eofCancel bool + expectErr bool + errString string + }{ + { + name: "happy_path", + eofCancel: true, + }, + { + name: "Stream Framer not Running", + expectErr: true, + eofCancel: true, + errString: "StreamFramer not running", + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + file := prepFile(t) + goldenFileContents, err := os.ReadFile(file.Name()) + must.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(1) + streamMsg := make(chan []byte, len(goldenFileContents)) + streamBytes(streamMsg, &wg, file) + wg.Wait() + + frames := make(chan *sframer.StreamFrame, 32) + frameSize := 1024 + errCh := make(chan error, 1) + framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, frameSize) + streamReader := NewStreamReader(streamMsg, framer, int64(frameSize)) + ctx, cancel := context.WithCancel(context.Background()) + + defer cancel() + wg.Add(1) //block until streamReader completes + + go func() { + defer wg.Done() + defer streamReader.Destroy() + if !tc.expectErr { + streamReader.Run() + } + initialOffset := int64(0) + err := streamReader.StreamFixed(ctx, initialOffset, "", 0, errCh, tc.eofCancel) + if !tc.expectErr { + must.NoError(t, err) + } else { + must.NotNil(t, err) + must.EqError(t, err, tc.errString) + } + + }() + wg.Wait() + // Parse and validate the contents of the frames channel + var streamErr error + var builder strings.Builder + var skipCount int + + OUTER: + for skipCount < 2 { + select { + case frame, ok := <-frames: + if !ok { + select { + case streamErr = <-errCh: + must.NoError(t, streamErr) //we shouldn't hit an error here + default: + + } + break OUTER + } + builder.Write(frame.Data) + case streamErr = <-errCh: + must.NoError(t, streamErr) //we shouldn't hit an error here + case <-ctx.Done(): + break OUTER + default: + skipCount++ + time.Sleep(1 * time.Millisecond) //makes the test a touch less flakey + } + } + if !tc.expectErr { + must.Eq(t, string(goldenFileContents), builder.String()) + } + + }) + + } +} + +func TestScanServiceName(t *testing.T) { + cases := []struct { + testString string + expectErr bool + }{ + { + testString: `nomad`, + }, + { + testString: `nomad.socket`, + }, + { + testString: `nomad-client.service`, + }, + { + testString: `nomad.client.02.swap`, + }, + { + testString: `nomadhelper@54.device`, + }, + { + testString: `1.\@_-nomad@`, + expectErr: true, + }, + { + testString: `1./@_-nomad@.automount`, + expectErr: true, + }, + { + testString: `docker.path`, + expectErr: true, + }, + { + testString: `nomad.path.gotcha`, + expectErr: true, + }, + { + testString: `nomad/8.path`, + expectErr: true, + }, + { + testString: `nomad%.path`, + expectErr: true, + }, + { + testString: `nom4ad.path`, + expectErr: true, + }, + { + testString: `nomad,.path`, + expectErr: true, + }, + { + testString: `nomad.client`, + expectErr: true, + }, + { + testString: `nomad!.path`, + expectErr: true, + }, + { + testString: `nomad%http.timer`, + expectErr: true, + }, + { + testString: `nomad,http.mount`, + expectErr: true, + }, + { + testString: `nomad$http.service`, + expectErr: true, + }, + { + testString: `nomad$.http.service`, + expectErr: true, + }, + { + testString: `nomad$`, + expectErr: true, + }, + } + + for _, tc := range cases { + t.Run(tc.testString, func(t *testing.T) { + err := ScanServiceName(tc.testString) + if !tc.expectErr { + must.NoError(t, err) + } else { + must.Error(t, err) + } + + }) + } +} diff --git a/command/agent/monitor/test_helpers.go b/command/agent/monitor/test_helpers.go new file mode 100644 index 000000000..ab5b80ddd --- /dev/null +++ b/command/agent/monitor/test_helpers.go @@ -0,0 +1,99 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package monitor + +import ( + "encoding/json" + "errors" + "io" + "net" + "strings" + "time" + + "github.com/hashicorp/go-msgpack/v2/codec" + sframer "github.com/hashicorp/nomad/client/lib/streamframer" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/nomad/structs" +) + +// StreamingClient is an interface that implements the StreamingRpcHandler function +type StreamingClient interface { + StreamingRpcHandler(string) (structs.StreamingRpcHandler, error) +} + +// ExportMonitorClient_TestHelper consolidates streaming test setup for use in +// client and server RPChandler tests +func ExportMonitorClient_TestHelper(req cstructs.MonitorExportRequest, c StreamingClient, + userTimeout <-chan time.Time) (*strings.Builder, error) { + var ( + builder strings.Builder + returnedErr error + timeout <-chan time.Time + ) + handler, err := c.StreamingRpcHandler("Agent.MonitorExport") + if err != nil { + return nil, err + } + + // create pipe + p1, p2 := net.Pipe() + defer p1.Close() + defer p2.Close() + + errCh := make(chan error) + streamMsg := make(chan *cstructs.StreamErrWrapper) + + go handler(p2) + + // Start decoder + go func() { + decoder := codec.NewDecoder(p1, structs.MsgpackHandle) + for { + var msg cstructs.StreamErrWrapper + err := decoder.Decode(&msg) + streamMsg <- &msg + if err != nil { + errCh <- err + return + } + + } + }() + + // send request + encoder := codec.NewEncoder(p1, structs.MsgpackHandle) + if err := encoder.Encode(req); err != nil { + return nil, err + } + if userTimeout != nil { + timeout = userTimeout + } + +OUTER: + for { + select { + case <-timeout: + return nil, errors.New("expected to be unreachable") + case err := <-errCh: + if err != nil && err != io.EOF { + return nil, err + } + case message := <-streamMsg: + var frame sframer.StreamFrame + + if message.Error != nil { + returnedErr = message.Error + } + + if len(message.Payload) != 0 { + err = json.Unmarshal(message.Payload, &frame) + returnedErr = err + builder.Write(frame.Data) + } else { + break OUTER + } + } + } + return &builder, returnedErr +} diff --git a/command/agent_monitor.go b/command/agent_monitor.go index 2b177e237..953c16eec 100644 --- a/command/agent_monitor.go +++ b/command/agent_monitor.go @@ -7,11 +7,8 @@ import ( "fmt" "io" "os" - "os/signal" "strconv" "strings" - "syscall" - "time" "github.com/hashicorp/cli" "github.com/hashicorp/nomad/api" @@ -127,31 +124,12 @@ func (c *MonitorCommand) Run(args []string) int { eventDoneCh := make(chan struct{}) frames, errCh := client.Agent().Monitor(eventDoneCh, query) - select { - case err := <-errCh: + r, err := streamFrames(frames, errCh, -1, eventDoneCh) + if err != nil { c.Ui.Error(fmt.Sprintf("Error starting monitor: %s", err)) c.Ui.Error(commandErrorText(c)) return 1 - default: } - - // Create a reader - var r io.ReadCloser - frameReader := api.NewFrameReader(frames, errCh, eventDoneCh) - frameReader.SetUnblockTime(500 * time.Millisecond) - r = frameReader - - defer r.Close() - - signalCh := make(chan os.Signal, 1) - signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) - - go func() { - <-signalCh - // End the streaming - r.Close() - }() - _, err = io.Copy(os.Stdout, r) if err != nil { c.Ui.Error(fmt.Sprintf("error monitoring logs: %s", err)) diff --git a/command/agent_monitor_export.go b/command/agent_monitor_export.go new file mode 100644 index 000000000..b803f3101 --- /dev/null +++ b/command/agent_monitor_export.go @@ -0,0 +1,209 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/cli" + "github.com/hashicorp/nomad/api" + "github.com/posener/complete" +) + +type MonitorExportCommand struct { + Meta + + // Below this point is where CLI flag options are stored. + nodeID string + serverID string + onDisk bool + logsSince time.Duration + serviceName string + follow bool +} + +func (c *MonitorExportCommand) Help() string { + helpText := ` +Usage: nomad monitor export [options] + +Use the 'nomad monitor export' command to export an agent's historic data +from journald or its Nomad log file. If exporting journald logs, you must +pass '-service-name' with the name of the nomad service. +The '-logs-since' and '-follow' options are only valid for journald queries. +You may pass a duration string to the '-logs-since' option to override the +default 72h duration. Nomad will accept the following time units in the +'-logs-since duration string:"ns", "us" (or "µs"), "ms", "s", "m", "h". +The '-follow=true' option causes the agent to continue to stream logs until +interrupted or until the remote agent quits. Nomad only supports journald +queries on Linux. + +If you do not use Linux or you do not run Nomad as a systemd unit, pass the +'-on-disk=true' option to export the entirety of a given agent's nomad log file. + +When ACLs are enabled, this command requires a token with the 'agent:read' +capability. + + +General Options: + + ` + generalOptionsUsage(usageOptsDefault|usageOptsNoNamespace) + ` + +Monitor Specific Options: + + -node-id + Sets the specific node to monitor. Accepts only a single node-id and cannot + be used with server-id. + + -server-id + Sets the specific server to monitor. Accepts only a single server-id and + cannot be used with node-id. + + -service-name + Sets the name of the nomad service, must match systemd conventions and + include the word 'nomad'. You may provide the full systemd file name + or omit the suffix. If your service name includes a '.', you must include + a valid suffix (e.g. nomad.client.service). + + -logs-since + Sets the journald log period, invalid if on-disk=true. Defaults to 72h. + Valid unit strings are "ns", "us" (or "µs"), "ms", "s", "m", "h". + + -follow + If set, the export command will continue streaming until interrupted. Ignored + if on-disk=true. + + -on-disk + If set, the export command will retrieve the Nomad log file defined in the + target agent's log_file configuration. +` + return strings.TrimSpace(helpText) +} + +func (c *MonitorExportCommand) Synopsis() string { + return "Stream logs from a Nomad agent" +} + +func (c *MonitorExportCommand) AutocompleteFlags() complete.Flags { + return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), + complete.Flags{ + "-node-id": NodePredictor(c.Client), + "-server-id": ServerPredictor(c.Client), + "-service-name": complete.PredictSet("nomad"), + "-logs-since": complete.PredictNothing, + "-follow": complete.PredictNothing, + "-on-disk": complete.PredictNothing, + }) +} + +func (c *MonitorExportCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *MonitorExportCommand) Name() string { return "monitor export" } + +func (c *MonitorExportCommand) Run(args []string) int { + c.Ui = &cli.PrefixedUi{ + OutputPrefix: " ", + InfoPrefix: " ", + ErrorPrefix: "==> ", + Ui: c.Ui, + } + defaultDur := time.Hour * 72 + + flags := c.Meta.FlagSet(c.Name(), FlagSetClient) + flags.Usage = func() { c.Ui.Output(c.Help()) } + flags.StringVar(&c.nodeID, "node-id", "", "") + flags.StringVar(&c.serverID, "server-id", "", "") + flags.DurationVar(&c.logsSince, "logs-since", defaultDur, + `sets the journald log period. Defaults to 72h, valid unit strings are + "ns", "us" (or "µs"), "ms", "s", "m", or "h".`) + flags.StringVar(&c.serviceName, "service-name", "", + "the name of the systemdervice unit to collect logs for, cannot be used with on-disk=true") + flags.BoolVar(&c.onDisk, "on-disk", false, + "directs the cli to stream the configured nomad log file, cannot be used with -service-name") + flags.BoolVar(&c.follow, "follow", false, "") + + if err := flags.Parse(args); err != nil { + return 1 + } + + args = flags.Args() + if l := len(args); l != 0 { + c.Ui.Error("This command takes no arguments") + c.Ui.Error(commandErrorText(c)) + return 1 + } + + if c.serviceName != "" && c.onDisk { + c.Ui.Error("Cannot target journalctl and nomad log file simultaneously") + c.Ui.Error(commandErrorText(c)) + } + + if c.serviceName != "" { + if isNomad := strings.Contains(c.serviceName, "nomad"); !isNomad { + c.Ui.Error(fmt.Sprintf("Invalid value: -service-name=%s does not include 'nomad'", c.serviceName)) + c.Ui.Error(commandErrorText(c)) + } + } + + if c.serviceName == "" && !c.onDisk { + c.Ui.Error("One of -service-name or -on-disk must be set") + } + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + c.Ui.Error(commandErrorText(c)) + return 1 + } + + // Query the node info and lookup prefix + if c.nodeID != "" { + c.nodeID, err = lookupNodeID(client.Nodes(), c.nodeID) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + } + + params := map[string]string{ + "follow": strconv.FormatBool(c.follow), + "logs_since": c.logsSince.String(), + "node_id": c.nodeID, + "on_disk": strconv.FormatBool(c.onDisk), + "server_id": c.serverID, + "service_name": c.serviceName, + } + + query := &api.QueryOptions{ + Params: params, + } + + eventDoneCh := make(chan struct{}) + frames, errCh := client.Agent().MonitorExport(eventDoneCh, query) + r, err := streamFrames(frames, errCh, -1, eventDoneCh) + + if err != nil { + c.Ui.Error(fmt.Sprintf("Error starting monitor: \n%s", err)) + c.Ui.Error(commandErrorText(c)) + return 1 + } + + n, err := io.Copy(os.Stdout, r) + if err != nil && err != io.EOF { + c.Ui.Error(fmt.Sprintf("Error monitoring logs: %s", err.Error())) + return 1 + } + + if n == 0 && err == nil { + emptyMessage := "Returned no data or errors, check your log_file configuration or service name" + c.Ui.Error(fmt.Sprintf("Error starting monitor: \n%s", emptyMessage)) + return 1 + } + return 0 +} diff --git a/command/agent_monitor_export_test.go b/command/agent_monitor_export_test.go new file mode 100644 index 000000000..fe7b8bbb8 --- /dev/null +++ b/command/agent_monitor_export_test.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "os" + "path/filepath" + "testing" + + "github.com/hashicorp/cli" + "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/command/agent" + "github.com/shoenig/test/must" +) + +func TestMonitorExportCommand_Implements(t *testing.T) { + ci.Parallel(t) + var _ cli.Command = &MonitorExportCommand{} +} + +func TestMonitorExportCommand_Fails(t *testing.T) { + const expectedText = "log log log log log" + + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.log") + must.NoError(t, os.WriteFile(testFile, []byte(expectedText), 0777)) + config := func(c *agent.Config) { + c.LogFile = testFile + } + + srv, _, url := testServer(t, false, config) + defer srv.Shutdown() + cases := []struct { + name string + cmdArgs []string + defaultErr bool + errString string + }{ + { + name: "misuse", + cmdArgs: []string{"some", "bad", "args"}, + defaultErr: true, + }, + { + name: "no address", + cmdArgs: []string{"-address=nope"}, + errString: "unsupported protocol scheme", + }, + { + name: "invalid follow boolean", + cmdArgs: []string{"-address=" + url, "-follow=maybe"}, + errString: `invalid boolean value "maybe" for -follow`, + }, + { + name: "invalid on-disk boolean", + cmdArgs: []string{"-address=" + url, "-on-disk=maybe"}, + errString: `invalid boolean value "maybe" for -on-disk`, + }, + { + name: "setting both on-disk and service-name", + cmdArgs: []string{"-address=" + url, "-on-disk=true", "-service-name=nomad"}, + errString: "journald and nomad log file simultaneously", + }, + { + name: "setting neither on-disk nor service-name", + cmdArgs: []string{"-address=" + url}, + errString: "One of -service-name or -on-disk must be set", + }, + { + name: "requires nomad in service name", + cmdArgs: []string{"-address=" + url, "-service-name=docker.path"}, + errString: "does not include 'nomad'", + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + ui := cli.NewMockUi() + cmd := &MonitorExportCommand{Meta: Meta{Ui: ui}} + + code := cmd.Run(tc.cmdArgs) + must.One(t, code) + + out := ui.ErrorWriter.String() + if tc.defaultErr { + must.StrContains(t, out, commandErrorText(cmd)) + } else { + must.StrContains(t, out, tc.errString) + } + }) + } +} diff --git a/command/alloc_fs.go b/command/alloc_fs.go index 8d01bd2e8..9386bea9a 100644 --- a/command/alloc_fs.go +++ b/command/alloc_fs.go @@ -8,9 +8,7 @@ import ( "io" "math/rand" "os" - "os/signal" "strings" - "syscall" "time" humanize "github.com/dustin/go-humanize" @@ -355,38 +353,19 @@ func (f *AllocFSCommand) Run(args []string) int { return 0 } -// followFile outputs the contents of the file to stdout relative to the end of -// the file. If numLines does not equal -1, then tail -n behavior is used. +// followFile calls the streamFrames helper to output the contents of the +// file to stdout relative to the end of the file. If numLines does not equal +// -1, then tail -n behavior is used. func (f *AllocFSCommand) followFile(client *api.Client, alloc *api.Allocation, path, origin string, offset, numLines int64) (io.ReadCloser, error) { cancel := make(chan struct{}) + frames, errCh := client.AllocFS().Stream(alloc, path, origin, offset, cancel, nil) - select { - case err := <-errCh: + r, err := streamFrames(frames, errCh, numLines, cancel) + if err != nil { return nil, err - default: } - signalCh := make(chan os.Signal, 1) - signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) - - // Create a reader - var r io.ReadCloser - frameReader := api.NewFrameReader(frames, errCh, cancel) - frameReader.SetUnblockTime(500 * time.Millisecond) - r = frameReader - - // If numLines is set, wrap the reader - if numLines != -1 { - r = NewLineLimitReader(r, int(numLines), int(numLines*bytesToLines), 1*time.Second) - } - - go func() { - <-signalCh - - // End the streaming - r.Close() - }() return r, nil } diff --git a/command/commands.go b/command/commands.go index 0766ea43f..d17b5c7ee 100644 --- a/command/commands.go +++ b/command/commands.go @@ -579,6 +579,11 @@ func Commands(metaPtr *Meta, agentUi cli.Ui) map[string]cli.CommandFactory { Meta: meta, }, nil }, + "monitor export": func() (cli.Command, error) { + return &MonitorExportCommand{ + Meta: meta, + }, nil + }, "namespace": func() (cli.Command, error) { return &NamespaceCommand{ Meta: meta, diff --git a/command/helpers.go b/command/helpers.go index e871d0cff..9f7e2af47 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -11,9 +11,11 @@ import ( "io" "maps" "os" + "os/signal" "path/filepath" "strconv" "strings" + "syscall" "time" "github.com/hashicorp/cli" @@ -782,3 +784,35 @@ func getByPrefix[T any]( return nil, objs, nil } } + +func streamFrames(frames <-chan *api.StreamFrame, errCh <-chan error, + numLines int64, cancel chan struct{}) (io.ReadCloser, error) { + + select { + case err := <-errCh: + return nil, err + default: + } + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) + + // Create a reader + var r io.ReadCloser + frameReader := api.NewFrameReader(frames, errCh, cancel) + frameReader.SetUnblockTime(500 * time.Millisecond) + r = frameReader + + // If numLines is set, wrap the reader + if numLines != -1 { + r = NewLineLimitReader(r, int(numLines), int(numLines*bytesToLines), 1*time.Second) + } else { + } + go func() { + <-signalCh + + // End the streaming + r.Close() + }() + + return r, nil +} diff --git a/command/helpers_test.go b/command/helpers_test.go index 52cce0780..6f03ffcfd 100644 --- a/command/helpers_test.go +++ b/command/helpers_test.go @@ -722,3 +722,126 @@ func TestHelperGetByPrefix(t *testing.T) { } } + +// TestHelperStreamFrames tests the streamFrames command helper used +// by the agent_monitor and fs_alloc endpoints to populate a reader +// with data from the streamFrame channel passed to the function +func TestHelperStreamFrames(t *testing.T) { + const loopCount = 50 + + // Create test file + dir := t.TempDir() + f, err := os.CreateTemp(dir, "log") + must.NoError(t, err) + writeLine := []byte("[INFO]log log log made of wood you are heavy but so good\n") + writeLength := len(writeLine) + + for range loopCount { + _, _ = f.Write(writeLine) + } + f.Close() + + // Create test file reader for streaming + goldenFilePath := f.Name() + goldenFileContents, err := os.ReadFile(goldenFilePath) + must.NoError(t, err) + + fileReader, err := os.Open(goldenFilePath) + must.NoError(t, err) + + // Helper func to populate stream chan in test case + streamFunc := func() (chan *api.StreamFrame, chan error, chan struct{}) { + framesCh := make(chan *api.StreamFrame, 30) + errCh := make(chan error) + cancelCh := make(chan struct{}) + + offset := 0 + + r := io.LimitReader(fileReader, 64) + for { + bytesHolder := make([]byte, 64) + n, err := r.Read(bytesHolder) + if err != nil && err != io.EOF { + must.NoError(t, err) + } + + if n == 0 && err == io.EOF { + break + } + offset += n + framesCh <- &api.StreamFrame{ + Offset: int64(offset), + Data: goldenFileContents, + File: goldenFilePath, + } + + if n != 0 && err == io.EOF { + //break after sending if we hit EOF with bytes in buffer + break + } + } + + close(framesCh) + return framesCh, errCh, cancelCh + } + testErr := errors.New("isErr") + cases := []struct { + name string + numLines int + expectErr bool + err error + }{ + { + name: "happy_no_limit", + numLines: -1, + }, + { + name: "happy_limit", + numLines: 25, + }, + { + name: "error", + numLines: -1, + expectErr: true, + err: testErr, + }, + } + + for _, tc := range cases { + + t.Run(tc.name, func(t *testing.T) { + + framesCh, errCh, cancelCh := streamFunc() + + if tc.expectErr { + go func() { + time.Sleep(time.Nanosecond * 1) + errCh <- tc.err + }() + } + + r, err := streamFrames(framesCh, errCh, int64(tc.numLines), cancelCh) + if !tc.expectErr { + must.NoError(t, err) + } + + result, err := io.ReadAll(r) + if !tc.expectErr { + must.NoError(t, err) + } + if tc.numLines == -1 { + //expectedLength := writeLength * loopCount + must.Eq(t, + goldenFileContents, + result) + } else { + expectedLength := (writeLength * tc.numLines) + must.Eq(t, + expectedLength, + len(result)) + } + + r.Close() + }) + } +} diff --git a/nomad/client_agent_endpoint.go b/nomad/client_agent_endpoint.go index 1d63acfa2..1f545e48a 100644 --- a/nomad/client_agent_endpoint.go +++ b/nomad/client_agent_endpoint.go @@ -13,7 +13,7 @@ import ( "time" log "github.com/hashicorp/go-hclog" - + "github.com/hashicorp/go-msgpack/v2/codec" sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/command/agent/host" @@ -21,8 +21,6 @@ import ( "github.com/hashicorp/nomad/command/agent/pprof" "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" - - "github.com/hashicorp/go-msgpack/v2/codec" ) type Agent struct { @@ -35,6 +33,7 @@ func NewAgentEndpoint(srv *Server) *Agent { func (a *Agent) register() { a.srv.streamingRpcs.Register("Agent.Monitor", a.monitor) + a.srv.streamingRpcs.Register("Agent.MonitorExport", a.monitorExport) } func (a *Agent) Profile(args *structs.AgentPprofRequest, reply *structs.AgentPprofResponse) error { @@ -64,7 +63,7 @@ func (a *Agent) Profile(args *structs.AgentPprofRequest, reply *structs.AgentPpr return fmt.Errorf("missing target RPC") } - if region != a.srv.config.Region { + if region != a.srv.Region() { // Mark that we are forwarding args.SetForwarded() return a.srv.forwardRegion(region, "Agent.Profile", args, reply) @@ -87,7 +86,7 @@ func (a *Agent) Profile(args *structs.AgentPprofRequest, reply *structs.AgentPpr } // This server is the target, so now we can check for AllowAgentDebug - if !aclObj.AllowAgentDebug(a.srv.config.EnableDebug) { + if !aclObj.AllowAgentDebug(a.srv.GetConfig().EnableDebug) { return structs.ErrPermissionDenied } @@ -168,17 +167,17 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { // Targeting a node, forward request to node if args.NodeID != "" { - a.forwardMonitorClient(conn, args, encoder, decoder) + a.forwardMonitorClient(conn, args, encoder, decoder, args.NodeID, "Agent.Monitor") // forwarded request has ended, return return } region := args.RequestRegion() if region == "" { - handleStreamResultError(fmt.Errorf("missing target RPC"), pointer.Of(int64(400)), encoder) + handleStreamResultError(fmt.Errorf("missing target region"), pointer.Of(int64(400)), encoder) return } - if region != a.srv.config.Region { + if region != a.srv.Region() { // Mark that we are forwarding args.SetForwarded() } @@ -191,7 +190,9 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { return } if serverToFwd != nil { - a.forwardMonitorServer(conn, serverToFwd, args, encoder, decoder) + // Empty ServerID to prevent forwarding loop + args.ServerID = "" + a.forwardMonitorServer(conn, serverToFwd, args, encoder, decoder, "Agent.Monitor") return } } @@ -200,7 +201,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - monitor := monitor.New(512, a.srv.logger, &log.LoggerOptions{ + m := monitor.New(512, a.srv.logger, &log.LoggerOptions{ Level: logLevel, JSONFormat: args.LogJSON, IncludeLocation: args.LogIncludeLocation, @@ -225,8 +226,8 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { <-ctx.Done() }() - logCh := monitor.Start() - defer monitor.Stop() + logCh := m.Start() + defer m.Stop() initialOffset := int64(0) // receive logs and build frames @@ -248,48 +249,135 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { } } }() + streamEncoder := monitor.NewStreamEncoder(&buf, conn, encoder, frameCodec, args.PlainText) + streamErr := streamEncoder.EncodeStream(frames, errCh, ctx, framer, false) + if streamErr != nil { + handleStreamResultError(streamErr, pointer.Of(int64(500)), encoder) + return + } +} - var streamErr error -OUTER: - for { - select { - case frame, ok := <-frames: - if !ok { - // frame may have been closed when an error - // occurred. Check once more for an error. - select { - case streamErr = <-errCh: - // There was a pending error! - default: - // No error, continue on - } +func (a *Agent) monitorExport(conn io.ReadWriteCloser) { + defer conn.Close() + // Decode args + var args cstructs.MonitorExportRequest + decoder := codec.NewDecoder(conn, structs.MsgpackHandle) + encoder := codec.NewEncoder(conn, structs.MsgpackHandle) - break OUTER - } + if err := decoder.Decode(&args); err != nil { + handleStreamResultError(err, pointer.Of(int64(500)), encoder) + return + } + authErr := a.srv.Authenticate(nil, &args) + a.srv.MeasureRPCRate("agent", structs.RateMetricRead, &args) + if authErr != nil { + handleStreamResultError(structs.ErrPermissionDenied, nil, encoder) + return + } - var resp cstructs.StreamErrWrapper - if args.PlainText { - resp.Payload = frame.Data - } else { - if err := frameCodec.Encode(frame); err != nil { - streamErr = err - break OUTER - } + // Check agent read permissions + if aclObj, err := a.srv.ResolveACL(&args); err != nil { + handleStreamResultError(err, nil, encoder) + return + } else if !aclObj.AllowAgentRead() { + handleStreamResultError(structs.ErrPermissionDenied, pointer.Of(int64(403)), encoder) + return + } - resp.Payload = buf.Bytes() - buf.Reset() - } + // Targeting a node, forward request to node + if args.NodeID != "" { + a.forwardMonitorClient(conn, args, encoder, decoder, args.NodeID, "Agent.MonitorExport") + // forwarded request has ended, return + return + } - if err := encoder.Encode(resp); err != nil { - streamErr = err - break OUTER - } - encoder.Reset(conn) - case <-ctx.Done(): - break OUTER + region := args.RequestRegion() + if region == "" { + handleStreamResultError(fmt.Errorf("missing target region"), pointer.Of(int64(400)), encoder) + return + } + if region != a.srv.Region() { + // Mark that we are forwarding + args.SetForwarded() + } + + // Try to forward request to remote region/server + if args.ServerID != "" { + serverToFwd, err := a.forwardFor(args.ServerID, region) + if err != nil { + handleStreamResultError(err, pointer.Of(int64(400)), encoder) + return + } + if serverToFwd != nil { + //empty args.ServerID to prevent forwarding loop + args.ServerID = "" + a.forwardMonitorServer(conn, serverToFwd, args, encoder, decoder, "Agent.MonitorExport") + return } } + nomadLogPath := a.srv.GetConfig().LogFile + if args.OnDisk && nomadLogPath == "" { + handleStreamResultError(errors.New("No nomad log file defined"), pointer.Of(int64(400)), encoder) + } + // NodeID was empty, ServerID was equal to this server, monitor this server + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + opts := monitor.MonitorExportOpts{ + Logger: a.srv.logger, + LogsSince: args.LogsSince, + ServiceName: args.ServiceName, + NomadLogPath: nomadLogPath, + OnDisk: args.OnDisk, + Follow: args.Follow, + Context: ctx, + } + + frames := make(chan *sframer.StreamFrame, 32) + errCh := make(chan error) + var buf bytes.Buffer + frameSize := 1024 + frameCodec := codec.NewEncoder(&buf, structs.JsonHandle) + + framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, frameSize) + framer.Run() + defer framer.Destroy() + + // goroutine to detect remote side closing + go func() { + if _, err := conn.Read(nil); err != nil { + // One end of the pipe explicitly closed, exit + cancel() + return + } + <-ctx.Done() + }() + m, err := monitor.NewExportMonitor(opts) + if err != nil { + handleStreamResultError(err, pointer.Of(int64(500)), encoder) + return + } + + var eofCancelCh chan error + + streamCh := m.Start() + initialOffset := int64(0) + eofCancel := !opts.Follow + + streamEncoder := monitor.NewStreamEncoder(&buf, conn, encoder, frameCodec, args.PlainText) + // receive logs and build frames + streamReader := monitor.NewStreamReader(streamCh, framer, int64(frameSize)) + go func() { + defer framer.Destroy() + if err := streamReader.StreamFixed(ctx, initialOffset, "", 0, eofCancelCh, eofCancel); err != nil { + select { + case errCh <- err: + case <-ctx.Done(): + } + } + }() + + streamErr := streamEncoder.EncodeStream(frames, errCh, ctx, framer, true) if streamErr != nil { handleStreamResultError(streamErr, pointer.Of(int64(500)), encoder) return @@ -334,11 +422,10 @@ func (a *Agent) forwardFor(serverID, region string) (*serverParts, error) { return target, nil } -func (a *Agent) forwardMonitorClient(conn io.ReadWriteCloser, args cstructs.MonitorRequest, encoder *codec.Encoder, decoder *codec.Decoder) { +func (a *Agent) forwardMonitorClient(conn io.ReadWriteCloser, args any, encoder *codec.Encoder, decoder *codec.Decoder, nodeID string, endpoint string) { // Get the Connection to the client either by fowarding to another server // or creating direct stream - - state, srv, err := a.findClientConn(args.NodeID) + state, srv, err := a.findClientConn(nodeID) if err != nil { handleStreamResultError(err, pointer.Of(int64(500)), encoder) return @@ -347,7 +434,7 @@ func (a *Agent) forwardMonitorClient(conn io.ReadWriteCloser, args cstructs.Moni var clientConn net.Conn if state == nil { - conn, err := a.srv.streamingRpc(srv, "Agent.Monitor") + conn, err := a.srv.streamingRpc(srv, endpoint) if err != nil { handleStreamResultError(err, nil, encoder) return @@ -355,7 +442,7 @@ func (a *Agent) forwardMonitorClient(conn io.ReadWriteCloser, args cstructs.Moni clientConn = conn } else { - stream, err := NodeStreamingRpc(state.Session, "Agent.Monitor") + stream, err := NodeStreamingRpc(state.Session, endpoint) if err != nil { handleStreamResultError(err, nil, encoder) return @@ -374,10 +461,7 @@ func (a *Agent) forwardMonitorClient(conn io.ReadWriteCloser, args cstructs.Moni structs.Bridge(conn, clientConn) } -func (a *Agent) forwardMonitorServer(conn io.ReadWriteCloser, server *serverParts, args cstructs.MonitorRequest, encoder *codec.Encoder, decoder *codec.Decoder) { - // empty ServerID to prevent forwarding loop - args.ServerID = "" - +func (a *Agent) forwardMonitorServer(conn io.ReadWriteCloser, server *serverParts, args any, encoder *codec.Encoder, decoder *codec.Decoder, endpoint string) { serverConn, err := a.srv.streamingRpc(server, "Agent.Monitor") if err != nil { handleStreamResultError(err, pointer.Of(int64(500)), encoder) @@ -439,7 +523,7 @@ func (a *Agent) Host(args *structs.HostDataRequest, reply *structs.HostDataRespo return fmt.Errorf("missing target RPC") } - if region != a.srv.config.Region { + if region != a.srv.Region() { // Mark that we are forwarding args.SetForwarded() return a.srv.forwardRegion(region, "Agent.Host", args, reply) diff --git a/nomad/client_agent_endpoint_test.go b/nomad/client_agent_endpoint_test.go index 3dcaa9ef7..9896c9f74 100644 --- a/nomad/client_agent_endpoint_test.go +++ b/nomad/client_agent_endpoint_test.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "net" + "os" "strings" "testing" "time" @@ -21,6 +22,7 @@ import ( "github.com/hashicorp/nomad/client/config" sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/command/agent/pprof" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -1023,3 +1025,86 @@ func TestAgentHost_ACLDebugRequired(t *testing.T) { err := s.RPC("Agent.Host", &req, &resp) must.EqError(t, err, structs.ErrPermissionDenied.Error()) } + +func TestMonitor_MonitorExport(t *testing.T) { + ci.Parallel(t) + const ( + shortText = "log log log log log" + ) + // Create test file + dir := t.TempDir() + f, err := os.CreateTemp(dir, "log") + must.NoError(t, err) + for range 1000 { + _, _ = f.WriteString(fmt.Sprintf("%v [INFO] it's log, it's log, it's big it's heavy it's wood", time.Now())) + } + f.Close() + longFilePath := f.Name() + longFileContents, err := os.ReadFile(longFilePath) + must.NoError(t, err) + + // start server + s, root, cleanupS := TestACLServer(t, func(c *Config) { + c.LogFile = longFilePath + }) + defer cleanupS() + defer os.Remove(longFilePath) + testutil.WaitForLeader(t, s.RPC) + + cases := []struct { + name string + expected string + nomadLogPath string + serviceName string + token *structs.ACLToken + onDisk bool + expectErr bool + }{ + { + name: "happy_path_long_file", + onDisk: true, + expected: string(longFileContents), + token: root, + }, + { + name: "token_error", + onDisk: true, + expected: string(longFileContents), + token: &structs.ACLToken{}, + expectErr: true, + }, + { + name: "invalid_service_name", + serviceName: "nomad$", + expected: string(longFileContents), + token: &structs.ACLToken{}, + expectErr: true, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + + // No NodeID set to force server use + req := cstructs.MonitorExportRequest{ + LogsSince: "72", + NomadLogPath: tc.nomadLogPath, + OnDisk: tc.onDisk, + + ServiceName: tc.serviceName, + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: tc.token.SecretID, + }, + } + + builder, finalError := monitor.ExportMonitorClient_TestHelper(req, s, time.After(3*time.Second)) + if tc.expectErr { + must.Error(t, finalError) + return + } + must.NoError(t, err) + must.NotNil(t, builder) + must.Eq(t, strings.TrimSpace(tc.expected), strings.TrimSpace(builder.String())) + }) + } +} diff --git a/nomad/config.go b/nomad/config.go index c1e1c969e..a0b2d6887 100644 --- a/nomad/config.go +++ b/nomad/config.go @@ -448,6 +448,9 @@ type Config struct { // considered healthy. Without this, the server can hang indefinitely // waiting for these. StartTimeout time.Duration + + // LogFile is used by MonitorExport to stream a server's log file + LogFile string `hcl:"log_file"` } func (c *Config) Copy() *Config { diff --git a/website/content/commands/monitor/export.mdx b/website/content/commands/monitor/export.mdx new file mode 100644 index 000000000..42feb818e --- /dev/null +++ b/website/content/commands/monitor/export.mdx @@ -0,0 +1,79 @@ +--- +layout: docs +page_title: 'nomad monitor export command reference' +description: | + The `nomad monitor export` command returns logs written to disk or journald by a nomad agent. +--- + +# `nomad monitor export` command reference + +The `nomad monitor export` command returns logs written to disk or journald by a nomad agent. + +## Usage + +```plaintext +nomad monitor export [options] +``` + +Use the `nomad monitor export` command to export an agent's historic data +from journald or its Nomad log file. If exporting journald logs, you must +pass `-service-name` with the name of the systemd unit to query. +The `-logs-since` and `-follow` options are only valid for journald queries. +You may pass a duration string to the `-logs-since` option to override the +default 72h duration. Nomad will accept the following time units in the +`-logs-since` duration string: "ns", "us" (or "µs"), "ms", "s", "m", "h". +The `-follow=true` option causes the agent to continue to stream logs until +interrupted or until the remote agent quits. Nomad only supports journald +queries on Linux. + +If you do not use Linux or you do not run Nomad as a systemd unit, pass the +`-on-disk=true` option to export the entirety of a given agent's nomad log file. + +When ACLs are enabled, this command requires a token with the `agent:read` +capability. + +## Options + +- `-node-id`: Specifies the client node-id to stream logs from. If no + node-id is given, the Nomad server from the `-address` flag is used. + +- `-server-id`: Specifies the Nomad server id to stream logs from. Accepts + server names from `nomad server members` and also a special `leader` option + which will target the current leader. + +- `-service-name`: Specifies the the name of the systemd unit for export. + Do not use with `-on-disk`. Must include 'nomad' and conform to systemd + naming conventions. You may provide the full systemd file name + or omit the suffix. If your service name includes a '.', you must include + a valid suffix (e.g. nomad.client.service). + +- `-logs-since`: Duration used to determine how far back to return logs from + journald. Ignored if used with `-on-disk` and defaults to `72h` if not set. + +- `-follow`: Boolean that, if true, continues streaming journald logs until + interrupted. Do not use with `-on-disk` + +- `-on-disk`: Boolean that, if true, returns the contents of the Nomad log file + defined in the agent config. + +## Examples + +This example returns journald log entries with a specific node ID and service name. + +```shell-session +$ nomad monitor export -node-id=$(nomad node status --quiet) -service-name="nomad" +Jun 04 20:09:29 nomad-client01 systemd[1]: Starting Nomad... +Subject: A start job for unit nomad_client.service has begun execution +``` + +This example returns the contents of the nomad log file for a specific server. + +```shell-session +$ nomad monitor export -server-id=a57b2adb-1a30-2dda-8df0-25abb0881952 -on-disk=true +2025-06-20T12:22:08.528-0500 [DEBUG] http: request complete: method=GET path=/v1/agent/health?type=server duration=1.445739ms +2025-06-20T12:22:09.892-0500 [DEBUG] nomad: memberlist: Stream connection from=127.0.0.1:53628 +``` + +## General options + +@include 'general_options_no_namespace.mdx' diff --git a/website/content/commands/monitor.mdx b/website/content/commands/monitor/index.mdx similarity index 100% rename from website/content/commands/monitor.mdx rename to website/content/commands/monitor/index.mdx diff --git a/website/data/commands-nav-data.json b/website/data/commands-nav-data.json index 172f0503d..8d03b1365 100644 --- a/website/data/commands-nav-data.json +++ b/website/data/commands-nav-data.json @@ -412,7 +412,15 @@ }, { "title": "monitor", - "path": "monitor" + "routes": [ + { + "title": "Overview", + "path": "monitor" + }, + { "title": "export", + "path": "monitor/export" + } + ] }, { "title": "namespace", From 7790dd1c6550d8ddb1af84d09aa96b478f3c510e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Aug 2025 09:50:12 +0200 Subject: [PATCH 20/27] chore(deps): bump github.com/aws/aws-sdk-go-v2/config (#26412) Bumps [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) from 1.29.18 to 1.30.2. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.29.18...v1.30.2) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-version: 1.30.2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 22 +++++++++++----------- go.sum | 44 ++++++++++++++++++++++---------------------- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/go.mod b/go.mod index 00cd7db81..b205e3dcc 100644 --- a/go.mod +++ b/go.mod @@ -16,8 +16,8 @@ require ( github.com/Masterminds/sprig/v3 v3.3.0 github.com/Microsoft/go-winio v0.6.2 github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e - github.com/aws/aws-sdk-go-v2/config v1.29.18 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 + github.com/aws/aws-sdk-go-v2/config v1.30.2 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.1 github.com/aws/smithy-go v1.22.5 github.com/container-storage-interface/spec v1.11.0 github.com/containerd/errdefs v1.0.0 @@ -183,18 +183,18 @@ require ( github.com/armon/go-metrics v0.4.1 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go v1.55.6 // indirect - github.com/aws/aws-sdk-go-v2 v1.36.6 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.71 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37 // indirect + github.com/aws/aws-sdk-go-v2 v1.37.1 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0 // indirect github.com/aws/aws-sdk-go-v2/service/ecs v1.53.8 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.18 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.6 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.26.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.35.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.1.0 // indirect diff --git a/go.sum b/go.sum index d5fa946bf..4db4b8f0e 100644 --- a/go.sum +++ b/go.sum @@ -730,34 +730,34 @@ github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.36.6 h1:zJqGjVbRdTPojeCGWn5IR5pbJwSQSBh5RWFTQcEQGdU= -github.com/aws/aws-sdk-go-v2 v1.36.6/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0= -github.com/aws/aws-sdk-go-v2/config v1.29.18 h1:x4T1GRPnqKV8HMJOMtNktbpQMl3bIsfx8KbqmveUO2I= -github.com/aws/aws-sdk-go-v2/config v1.29.18/go.mod h1:bvz8oXugIsH8K7HLhBv06vDqnFv3NsGDt2Znpk7zmOU= -github.com/aws/aws-sdk-go-v2/credentials v1.17.71 h1:r2w4mQWnrTMJjOyIsZtGp3R3XGY3nqHn8C26C2lQWgA= -github.com/aws/aws-sdk-go-v2/credentials v1.17.71/go.mod h1:E7VF3acIup4GB5ckzbKFrCK0vTvEQxOxgdq4U3vcMCY= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 h1:D9ixiWSG4lyUBL2DDNK924Px9V/NBVpML90MHqyTADY= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33/go.mod h1:caS/m4DI+cij2paz3rtProRBI4s/+TCiWoaWZuQ9010= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37 h1:osMWfm/sC/L4tvEdQ65Gri5ZZDCUpuYJZbTTDrsn4I0= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37/go.mod h1:ZV2/1fbjOPr4G4v38G3Ww5TBT4+hmsK45s/rxu1fGy0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37 h1:v+X21AvTb2wZ+ycg1gx+orkB/9U6L7AOp93R7qYxsxM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37/go.mod h1:G0uM1kyssELxmJ2VZEfG0q2npObR3BAkF3c1VsfVnfs= +github.com/aws/aws-sdk-go-v2 v1.37.1 h1:SMUxeNz3Z6nqGsXv0JuJXc8w5YMtrQMuIBmDx//bBDY= +github.com/aws/aws-sdk-go-v2 v1.37.1/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= +github.com/aws/aws-sdk-go-v2/config v1.30.2 h1:YE1BmSc4fFYqFgN1mN8uzrtc7R9x+7oSWeX8ckoltAw= +github.com/aws/aws-sdk-go-v2/config v1.30.2/go.mod h1:UNrLGZ6jfAVjgVJpkIxjLufRJqTXCVYOpkeVf83kwBo= +github.com/aws/aws-sdk-go-v2/credentials v1.18.2 h1:mfm0GKY/PHLhs7KO0sUaOtFnIQ15Qqxt+wXbO/5fIfs= +github.com/aws/aws-sdk-go-v2/credentials v1.18.2/go.mod h1:v0SdJX6ayPeZFQxgXUKw5RhLpAoZUuynxWDfh8+Eknc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.1 h1:owmNBboeA0kHKDcdF8KiSXmrIuXZustfMGGytv6OMkM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.1/go.mod h1:Bg1miN59SGxrZqlP8vJZSmXW+1N8Y1MjQDq1OfuNod8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1 h1:ksZXBYv80EFTcgc8OJO48aQ8XDWXIQL7gGasPeCoTzI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1/go.mod h1:HSksQyyJETVZS7uM54cir0IgxttTD+8aEoJMPGepHBI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1 h1:+dn/xF/05utS7tUhjIcndbuaPjfll2LhbH1cCDGLYUQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1/go.mod h1:hyAGz30LHdm5KBZDI58MXx5lDVZ5CUfvfTZvMu4HCZo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0 h1:3hH6o7Z2WeE1twvz44Aitn6Qz8DZN3Dh5IB4Eh2xq7s= github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0/go.mod h1:I76S7jN0nfsYTBtuTgTsJtK2Q8yJVDgrLr5eLN64wMA= github.com/aws/aws-sdk-go-v2/service/ecs v1.53.8 h1:v1OectQdV/L+KSFSiqK00fXGN8FbaljRfNFysmWB8D0= github.com/aws/aws-sdk-go-v2/service/ecs v1.53.8/go.mod h1:F0DbgxpvuSvtYun5poG67EHLvci4SgzsMVO6SsPUqKk= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 h1:CXV68E2dNqhuynZJPB80bhPQwAKqBWVer887figW6Jc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4/go.mod h1:/xFi9KtvBXP97ppCz1TAEvU1Uf66qvid89rbem3wCzQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.18 h1:vvbXsA2TVO80/KT7ZqCbx934dt6PY+vQ8hZpUZ/cpYg= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.18/go.mod h1:m2JJHledjBGNMsLOF1g9gbAxprzq3KjC8e4lxtn+eWg= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.6 h1:rGtWqkQbPk7Bkwuv3NzpE/scwwL9sC1Ul3tn9x83DUI= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.6/go.mod h1:u4ku9OLv4TO4bCPdxf4fA1upaMaJmP9ZijGk3AAOC6Q= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4 h1:OV/pxyXh+eMA0TExHEC4jyWdumLxNbzz1P0zJoezkJc= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4/go.mod h1:8Mm5VGYwtm+r305FfPSuc+aFkrypeylGYhFim6XEPoc= -github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 h1:aUrLQwJfZtwv3/ZNG2xRtEen+NqI3iesuacjP51Mv1s= -github.com/aws/aws-sdk-go-v2/service/sts v1.34.1/go.mod h1:3wFBZKoWnX3r+Sm7in79i54fBmNfwhdNdQuscCw7QIk= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1 h1:ky79ysLMxhwk5rxJtS+ILd3Mc8kC5fhsLBrP27r6h4I= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1/go.mod h1:+2MmkvFvPYM1vsozBWduoLJUi5maxFk5B7KJFECujhY= +github.com/aws/aws-sdk-go-v2/service/sso v1.26.1 h1:uWaz3DoNK9MNhm7i6UGxqufwu3BEuJZm72WlpGwyVtY= +github.com/aws/aws-sdk-go-v2/service/sso v1.26.1/go.mod h1:ILpVNjL0BO+Z3Mm0SbEeUoYS9e0eJWV1BxNppp0fcb8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.1 h1:XdG6/o1/ZDmn3wJU5SRAejHaWgKS4zHv0jBamuKuS2k= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.1/go.mod h1:oiotGTKadCOCl3vg/tYh4k45JlDF81Ka8rdumNhEnIQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.35.1 h1:iF4Xxkc0H9c/K2dS0zZw3SCkj0Z7n6AMnUiiyoJND+I= +github.com/aws/aws-sdk-go-v2/service/sts v1.35.1/go.mod h1:0bxIatfN0aLq4mjoLDeBpOjOke68OsFlXPDFJ7V0MYw= github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= From 57e7f8f28db565f7e62ce4292c872a7b138b811f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Aug 2025 09:51:06 +0200 Subject: [PATCH 21/27] chore(deps): bump github.com/prometheus/client_golang (#26413) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.22.0 to 1.23.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.22.0...v1.23.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-version: 1.23.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index b205e3dcc..e8e3f6717 100644 --- a/go.mod +++ b/go.mod @@ -113,7 +113,7 @@ require ( github.com/opencontainers/runc v1.2.6 github.com/opencontainers/runtime-spec v1.2.1 github.com/posener/complete v1.2.3 - github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_golang v1.23.0 github.com/prometheus/common v0.65.0 github.com/rs/cors v1.11.1 github.com/ryanuber/columnize v2.1.2+incompatible @@ -297,7 +297,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect diff --git a/go.sum b/go.sum index 4db4b8f0e..d6856cec6 100644 --- a/go.sum +++ b/go.sum @@ -1492,8 +1492,8 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1516,8 +1516,8 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= From 7ed9d168aef1cc685f6167d8a028c4c487f440ad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Aug 2025 10:21:57 +0200 Subject: [PATCH 22/27] chore(deps): bump github.com/hashicorp/go-set/v3 from 3.0.0 to 3.0.1 (#26414) Bumps [github.com/hashicorp/go-set/v3](https://github.com/hashicorp/go-set) from 3.0.0 to 3.0.1. - [Release notes](https://github.com/hashicorp/go-set/releases) - [Commits](https://github.com/hashicorp/go-set/compare/v3.0.0...v3.0.1) --- updated-dependencies: - dependency-name: github.com/hashicorp/go-set/v3 dependency-version: 3.0.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e8e3f6717..58d7a941a 100644 --- a/go.mod +++ b/go.mod @@ -71,7 +71,7 @@ require ( github.com/hashicorp/go-plugin v1.6.3 github.com/hashicorp/go-secure-stdlib/listenerutil v0.1.10 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 - github.com/hashicorp/go-set/v3 v3.0.0 + github.com/hashicorp/go-set/v3 v3.0.1 github.com/hashicorp/go-sockaddr v1.0.7 github.com/hashicorp/go-syslog v1.0.0 github.com/hashicorp/go-uuid v1.0.3 diff --git a/go.sum b/go.sum index d6856cec6..763213e8b 100644 --- a/go.sum +++ b/go.sum @@ -1215,8 +1215,8 @@ github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3 h1:xbrxd0U9XQW8qL1BAz2XrAjA github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3/go.mod h1:LWq2Sy8UoKKuK4lFuCNWSjJj57MhNNf2zzBWMtkAIX4= github.com/hashicorp/go-set/v2 v2.1.0 h1:iERPCQWks+I+4bTgy0CT2myZsCqNgBg79ZHqwniohXo= github.com/hashicorp/go-set/v2 v2.1.0/go.mod h1:6q4nh8UCVZODn2tJ5RbJi8+ki7pjZBsAEYGt6yaGeTo= -github.com/hashicorp/go-set/v3 v3.0.0 h1:CaJBQvQCOWoftrBcDt7Nwgo0kdpmrKxar/x2o6pV9JA= -github.com/hashicorp/go-set/v3 v3.0.0/go.mod h1:IEghM2MpE5IaNvL+D7X480dfNtxjRXZ6VMpK3C8s2ok= +github.com/hashicorp/go-set/v3 v3.0.1 h1:ZwO15ZYmIrFYL9zSm2wBuwcRiHxVdp46m/XA/MUlM6I= +github.com/hashicorp/go-set/v3 v3.0.1/go.mod h1:0oPQqhtitglZeT2ZiWnRIfUG6gJAHnn7LzrS7SbgNY4= github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= From 8eaf7b80ee71b957296323588cfd5242e71446f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Aug 2025 10:30:27 +0200 Subject: [PATCH 23/27] chore(deps): bump github.com/golang-jwt/jwt/v5 from 5.2.3 to 5.3.0 (#26416) Bumps [github.com/golang-jwt/jwt/v5](https://github.com/golang-jwt/jwt) from 5.2.3 to 5.3.0. - [Release notes](https://github.com/golang-jwt/jwt/releases) - [Changelog](https://github.com/golang-jwt/jwt/blob/main/VERSION_HISTORY.md) - [Commits](https://github.com/golang-jwt/jwt/compare/v5.2.3...v5.3.0) --- updated-dependencies: - dependency-name: github.com/golang-jwt/jwt/v5 dependency-version: 5.3.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 58d7a941a..a1bb8c021 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/elazarl/go-bindata-assetfs v1.0.1 github.com/fatih/color v1.18.0 github.com/go-jose/go-jose/v3 v3.0.4 - github.com/golang-jwt/jwt/v5 v5.2.3 + github.com/golang-jwt/jwt/v5 v5.3.0 github.com/golang/protobuf v1.5.4 github.com/golang/snappy v1.0.0 github.com/google/go-cmp v0.7.0 diff --git a/go.sum b/go.sum index 763213e8b..c56c6e4e0 100644 --- a/go.sum +++ b/go.sum @@ -976,8 +976,8 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.3 h1:kkGXqQOBSDDWRhWNXTFpqGSCMyh/PLnqUvMGJPDJDs0= -github.com/golang-jwt/jwt/v5 v5.2.3/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= From 9859f4a140d8cda1ca0de0dd562fe5001ff9380d Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 4 Aug 2025 12:07:27 -0400 Subject: [PATCH 24/27] document version check requirement on Raft message types (#26411) Whenever we add a new Raft message type, we almost always need to add a new version check to ensure that leaders aren't trying to write unknown Raft entries to older followers. Leave a note about this where the edits happen to reduce the risk of this unfortunately common bug. Ref: https://github.com/hashicorp/nomad-enterprise/pull/2973 --- nomad/structs/structs.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 0693427ef..847764f77 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -140,6 +140,9 @@ const ( // NOTE: MessageTypes are shared between CE and ENT. If you need to add a // new type, check that ENT is not already using that value. + // + // NOTE: Adding a new MessageType above? You need to have a version check + // for the feature to avoid panics during upgrades. ) const ( From 7c633f81097e1805e06e7f8e7ef7a67f462e28b8 Mon Sep 17 00:00:00 2001 From: Daniel Bennett Date: Mon, 4 Aug 2025 13:58:35 -0400 Subject: [PATCH 25/27] exec: don't panic on rootless raw_exec tasks (#26401) the executor dies, leaving an orphaned process still running. the panic fix: * don't `panic()` * and return an empty, but non-nil, func on cgroup error feature fix: * allow non-root agent to proceed with exec when cgroups are off --- .changelog/26401.txt | 3 ++ drivers/shared/executor/executor.go | 8 +++- drivers/shared/executor/executor_linux.go | 3 +- .../executor/executor_universal_linux.go | 48 +++++++++++-------- 4 files changed, 38 insertions(+), 24 deletions(-) create mode 100644 .changelog/26401.txt diff --git a/.changelog/26401.txt b/.changelog/26401.txt new file mode 100644 index 000000000..29dfafc4c --- /dev/null +++ b/.changelog/26401.txt @@ -0,0 +1,3 @@ +```release-note:bug +alloc exec: Fixed executor panic when exec-ing a rootless raw_exec task +``` diff --git a/drivers/shared/executor/executor.go b/drivers/shared/executor/executor.go index 66af5c29f..7834d1023 100644 --- a/drivers/shared/executor/executor.go +++ b/drivers/shared/executor/executor.go @@ -5,6 +5,7 @@ package executor import ( "context" + "errors" "fmt" "io" "os" @@ -49,6 +50,9 @@ var ( // The statistics the basic executor exposes ExecutorBasicMeasuredMemStats = []string{"RSS", "Swap"} ExecutorBasicMeasuredCpuStats = []string{"System Mode", "User Mode", "Percent"} + + // ErrCgroupMustBeSet occurs if a cgroup is not provided when expected + ErrCgroupMustBeSet = errors.New("cgroup must be set") ) // Executor is the interface which allows a driver to launch and supervise @@ -441,7 +445,7 @@ func (e *UniversalExecutor) Exec(deadline time.Time, name string, args []string) defer cancel() if cleanup, err := e.setSubCmdCgroup(&e.childCmd, e.command.StatsCgroup()); err != nil { - return nil, 0, err + return nil, 0, fmt.Errorf("Exec: %w", err) } else { defer cleanup() } @@ -533,7 +537,7 @@ func (e *UniversalExecutor) ExecStreaming(ctx context.Context, command []string, } cgroup := e.command.StatsCgroup() if cleanup, err := e.setSubCmdCgroup(cmd, cgroup); err != nil { - return err + return fmt.Errorf("ExecStreaming: %w", err) } else { defer cleanup() } diff --git a/drivers/shared/executor/executor_linux.go b/drivers/shared/executor/executor_linux.go index fffc18b53..84fc3cc54 100644 --- a/drivers/shared/executor/executor_linux.go +++ b/drivers/shared/executor/executor_linux.go @@ -7,7 +7,6 @@ package executor import ( "context" - "errors" "fmt" "io" "os" @@ -766,7 +765,7 @@ func (l *LibcontainerExecutor) configureCgroups(cfg *runc.Config, command *ExecC cg := command.StatsCgroup() if cg == "" { - return errors.New("cgroup must be set") + return fmt.Errorf("configureCgroups: %w", ErrCgroupMustBeSet) } // // set the libcontainer hook for writing the PID to cgroup.procs file diff --git a/drivers/shared/executor/executor_universal_linux.go b/drivers/shared/executor/executor_universal_linux.go index 9fdb0d7cd..ed4e2fb12 100644 --- a/drivers/shared/executor/executor_universal_linux.go +++ b/drivers/shared/executor/executor_universal_linux.go @@ -30,27 +30,32 @@ const ( // setSubCmdCgroup sets the cgroup for non-Task child processes of the // executor.Executor (since in cg2 it lives outside the task's cgroup) func (e *UniversalExecutor) setSubCmdCgroup(cmd *exec.Cmd, cgroup string) (func(), error) { + + // no extra setup needed for cg v1 or when cgroups are "off" + switch cgroupslib.GetMode() { + case cgroupslib.OFF, cgroupslib.CG1: + return func() {}, nil + default: + // continue for cg v2 + } + if cgroup == "" { - panic("cgroup must be set") + return nil, fmt.Errorf("error setting up exec subcommand: %w", ErrCgroupMustBeSet) + } + + fd, cleanup, err := e.statCG(cgroup) + if err != nil { + return nil, err } // make sure attrs struct has been set if cmd.SysProcAttr == nil { cmd.SysProcAttr = new(syscall.SysProcAttr) } + cmd.SysProcAttr.UseCgroupFD = true + cmd.SysProcAttr.CgroupFD = fd - switch cgroupslib.GetMode() { - case cgroupslib.CG2: - fd, cleanup, err := e.statCG(cgroup) - if err != nil { - return nil, err - } - cmd.SysProcAttr.UseCgroupFD = true - cmd.SysProcAttr.CgroupFD = fd - return cleanup, nil - default: - return func() {}, nil - } + return cleanup, nil } func (e *UniversalExecutor) ListProcesses() set.Collection[procstats.ProcessID] { @@ -93,11 +98,8 @@ func (e *UniversalExecutor) configureResourceContainer( ) (runningFunc, cleanupFunc, error) { cgroup := command.StatsCgroup() - // ensure tasks get the desired oom_score_adj value set - if err := e.setOomAdj(command.OOMScoreAdj); err != nil { - return nil, nil, err - } - + // we specify these return funcs as empty but non-nil, + // because callers may call them even if this function errors. // deleteCgroup will be called after the task has been launched // v1: remove the executor process from the task's cgroups // v2: let go of the file descriptor of the task's cgroup @@ -106,6 +108,11 @@ func (e *UniversalExecutor) configureResourceContainer( moveProcess = func() error { return nil } ) + // ensure tasks get the desired oom_score_adj value set + if err := e.setOomAdj(command.OOMScoreAdj); err != nil { + return moveProcess, deleteCgroup, err + } + // manually configure cgroup for cpu / memory constraints switch cgroupslib.GetMode() { case cgroupslib.CG1: @@ -113,9 +120,10 @@ func (e *UniversalExecutor) configureResourceContainer( return moveProcess, deleteCgroup, err } moveProcess, deleteCgroup = e.enterCG1(cgroup, command.CpusetCgroup()) + case cgroupslib.OFF: - deleteCgroup = func() {} - moveProcess = func() error { return nil } + // do nothing + default: e.configureCG2(cgroup, command) // configure child process to spawn in the cgroup From 21841d3067dfe742ca147b02a3bc70c5a03531a7 Mon Sep 17 00:00:00 2001 From: tehut Date: Mon, 4 Aug 2025 13:55:25 -0700 Subject: [PATCH 26/27] Add historical journald and log export flags to operator debug command (#26410) * Add -log-file-export and -log-lookback commands to add historical log to debug capture * use monitor.PrepFile() helper for other historical log tests --- .changelog/26410.txt | 3 + client/agent_endpoint_test.go | 9 +- command/agent/monitor/monitor_test.go | 9 +- command/agent/monitor/stream_helpers_test.go | 23 +--- command/agent/monitor/test_helpers.go | 25 ++++ command/operator_debug.go | 93 +++++++++++++- command/operator_debug_test.go | 123 +++++++++++++++++++ nomad/client_agent_endpoint_test.go | 9 +- website/content/commands/operator/debug.mdx | 13 ++ 9 files changed, 259 insertions(+), 48 deletions(-) create mode 100644 .changelog/26410.txt diff --git a/.changelog/26410.txt b/.changelog/26410.txt new file mode 100644 index 000000000..5f26d2982 --- /dev/null +++ b/.changelog/26410.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command: Add historical log capture to `nomad operator debug` command with `-log-lookback` and `-log-file-export` flags +``` diff --git a/client/agent_endpoint_test.go b/client/agent_endpoint_test.go index 3eb845bd1..99ce3a25e 100644 --- a/client/agent_endpoint_test.go +++ b/client/agent_endpoint_test.go @@ -454,14 +454,7 @@ func TestMonitor_MonitorExport(t *testing.T) { ci.Parallel(t) // Create test file - dir := t.TempDir() - f, err := os.CreateTemp(dir, "log") - must.NoError(t, err) - for range 1000 { - _, _ = f.WriteString(fmt.Sprintf("%v [INFO] it's log, it's log, it's big it's heavy it's wood", time.Now())) - } - f.Close() - testFilePath := f.Name() + testFilePath := monitor.PrepFile(t).Name() testFileContents, err := os.ReadFile(testFilePath) must.NoError(t, err) diff --git a/command/agent/monitor/monitor_test.go b/command/agent/monitor/monitor_test.go index 32a108411..8385a80dd 100644 --- a/command/agent/monitor/monitor_test.go +++ b/command/agent/monitor/monitor_test.go @@ -100,14 +100,7 @@ func TestMonitor_Export(t *testing.T) { expectedText = "log log log log log" ) - dir := t.TempDir() - f, err := os.CreateTemp(dir, "log") - must.NoError(t, err) - for range 1000 { - _, _ = f.WriteString(fmt.Sprintf("%v [INFO] it's log, it's log, it's big it's heavy it's wood", time.Now())) - } - f.Close() - goldenFilePath := f.Name() + goldenFilePath := PrepFile(t).Name() goldenFileContents, err := os.ReadFile(goldenFilePath) must.NoError(t, err) diff --git a/command/agent/monitor/stream_helpers_test.go b/command/agent/monitor/stream_helpers_test.go index 52bb38c9c..ab0a63b45 100644 --- a/command/agent/monitor/stream_helpers_test.go +++ b/command/agent/monitor/stream_helpers_test.go @@ -17,27 +17,6 @@ import ( "github.com/shoenig/test/must" ) -var writeLine = []byte("[INFO] log log log made of wood you are heavy but so good\n") - -func prepFile(t *testing.T) *os.File { - const loopCount = 10 - // Create test file to read from - dir := t.TempDir() - f, err := os.CreateTemp(dir, "log") - must.NoError(t, err) - - for range loopCount { - _, _ = f.Write(writeLine) - } - f.Close() - - // Create test file reader for stream set up - goldenFilePath := f.Name() - fileReader, err := os.Open(goldenFilePath) - must.NoError(t, err) - return fileReader -} - func TestClientStreamReader_StreamFixed(t *testing.T) { ci.Parallel(t) @@ -80,7 +59,7 @@ func TestClientStreamReader_StreamFixed(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - file := prepFile(t) + file := PrepFile(t) goldenFileContents, err := os.ReadFile(file.Name()) must.NoError(t, err) diff --git a/command/agent/monitor/test_helpers.go b/command/agent/monitor/test_helpers.go index ab5b80ddd..c3b12b0b6 100644 --- a/command/agent/monitor/test_helpers.go +++ b/command/agent/monitor/test_helpers.go @@ -6,15 +6,19 @@ package monitor import ( "encoding/json" "errors" + "fmt" "io" "net" + "os" "strings" + "testing" "time" "github.com/hashicorp/go-msgpack/v2/codec" sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/structs" + "github.com/shoenig/test/must" ) // StreamingClient is an interface that implements the StreamingRpcHandler function @@ -22,6 +26,27 @@ type StreamingClient interface { StreamingRpcHandler(string) (structs.StreamingRpcHandler, error) } +var writeLine = []byte(fmt.Sprintf("[INFO] log log log made of wood you are heavy but so good, %v\n", time.Now())) + +func PrepFile(t *testing.T) *os.File { + const loopCount = 100 + // Create test file to read from + dir := t.TempDir() + f, err := os.CreateTemp(dir, "log") + must.NoError(t, err) + + for range loopCount { + _, _ = f.Write(writeLine) + } + f.Close() + + // Create test file reader for stream set up + goldenFilePath := f.Name() + fileReader, err := os.Open(goldenFilePath) + must.NoError(t, err) + return fileReader +} + // ExportMonitorClient_TestHelper consolidates streaming test setup for use in // client and server RPChandler tests func ExportMonitorClient_TestHelper(req cstructs.MonitorExportRequest, c StreamingClient, diff --git a/command/operator_debug.go b/command/operator_debug.go index 0bdb9ac9c..61af3e280 100644 --- a/command/operator_debug.go +++ b/command/operator_debug.go @@ -47,6 +47,8 @@ type OperatorDebugCommand struct { pprofDuration time.Duration logLevel string logIncludeLocation bool + logLookback time.Duration + logFileExport bool maxNodes int nodeClass string nodeIDs []string @@ -183,6 +185,21 @@ Debug Options: Include file and line information in each log line monitored. The default is true. + -log-file-export= + Include the contents of agents' Nomad logfiles in the debug capture. The + log export monitor runs concurrently with the log monitor and ignores the + -log-level and -log-include-location flags used to configure that monitor. + Nomad returns an error if the agent does not have file logging configured. + Cannot be used with -log-lookback. + + -log-lookback= + Include historical journald logs in the debug capture. The journald + export monitor runs concurrently with the log monitor and ignores the + -log-level and -log-include-location flags used to configure that monitor. + This flag is only available on Linux systems using systemd. Refer to the + -log-file-export flag to retrieve historical logs on non-Linux systems, or + those without systemd. Cannot be used with -log-file-export. + -max-nodes= Cap the maximum number of client nodes included in the capture. Defaults to 10, set to 0 for unlimited. @@ -353,8 +370,7 @@ func (c *OperatorDebugCommand) Name() string { return "debug" } func (c *OperatorDebugCommand) Run(args []string) int { flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } - - var duration, interval, pprofInterval, output, pprofDuration, eventTopic string + var duration, interval, pprofInterval, output, pprofDuration, eventTopic, logLookback string var eventIndex int64 var nodeIDs, serverIDs string var allowStale bool @@ -365,6 +381,8 @@ func (c *OperatorDebugCommand) Run(args []string) int { flags.StringVar(&interval, "interval", "30s", "") flags.StringVar(&c.logLevel, "log-level", "TRACE", "") flags.BoolVar(&c.logIncludeLocation, "log-include-location", true, "") + flags.StringVar(&logLookback, "log-lookback", "", "") + flags.BoolVar(&c.logFileExport, "log-file-export", false, "") flags.IntVar(&c.maxNodes, "max-nodes", 10, "") flags.StringVar(&c.nodeClass, "node-class", "", "") flags.StringVar(&nodeIDs, "node-id", "all", "") @@ -400,6 +418,19 @@ func (c *OperatorDebugCommand) Run(args []string) int { return 1 } + // Parse the logLookback duration + l, err := time.ParseDuration(logLookback) + if err != nil && logLookback != "" { + c.Ui.Error(fmt.Sprintf("Error parsing -log-lookback: %s: %s", logLookback, err.Error())) + return 1 + } + c.logLookback = l + + if c.logLookback != 0 && c.logFileExport { + c.Ui.Error("Error parsing inputs, -log-file-export and -log-lookback cannot be used together.") + return 1 + } + // Parse the capture duration d, err := time.ParseDuration(duration) if err != nil { @@ -753,6 +784,16 @@ func (c *OperatorDebugCommand) mkdir(paths ...string) error { // startMonitors starts go routines for each node and client func (c *OperatorDebugCommand) startMonitors(client *api.Client) { + // if requested, start monitor export first + if c.logLookback != 0 || c.logFileExport { + for _, id := range c.nodeIDs { + go c.startMonitorExport(clientDir, "node_id", id, client) + } + + for _, id := range c.serverIDs { + go c.startMonitorExport(serverDir, "server_id", id, client) + } + } for _, id := range c.nodeIDs { go c.startMonitor(clientDir, "node_id", id, client) } @@ -801,6 +842,54 @@ func (c *OperatorDebugCommand) startMonitor(path, idKey, nodeID string, client * } } +// startMonitor starts one monitor api request, writing to a file. It blocks and should be +// called in a go routine. Errors are ignored, we want to build the archive even if a node +// is unavailable +func (c *OperatorDebugCommand) startMonitorExport(path, idKey, nodeID string, client *api.Client) { + monitorExportPath := "monitor_export.log" + qo := api.QueryOptions{ + Params: map[string]string{ + idKey: nodeID, + "on_disk": strconv.FormatBool(c.logFileExport), + "logs_since": c.logLookback.String(), + }, + AllowStale: c.queryOpts().AllowStale, + } + + // serviceName and onDisk cannot be set together, only set servicename if we're sure + // loglookback is set and logFileExport is false + if lookback := c.logLookback.String(); lookback != "" && !c.logFileExport { + qo.Params["service_name"] = "nomad" + } + + // prepare output location + c.mkdir(path, nodeID) + fh, err := os.Create(c.path(path, nodeID, monitorExportPath)) + if err != nil { + return + } + defer fh.Close() + + outCh, errCh := client.Agent().MonitorExport(c.ctx.Done(), &qo) + for { + select { + case out := <-outCh: + if out == nil { + continue + } + fh.Write(out.Data) + + case err := <-errCh: + if err != io.EOF { + fh.WriteString(fmt.Sprintf("monitor: %s\n", err.Error())) + return + } + case <-c.ctx.Done(): + return + } + } +} + // captureEventStream wraps the event stream capture process. func (c *OperatorDebugCommand) startEventStream(client *api.Client) { c.verboseOut("Launching eventstream goroutine...") diff --git a/command/operator_debug_test.go b/command/operator_debug_test.go index fa7640dfa..ae7d8989c 100644 --- a/command/operator_debug_test.go +++ b/command/operator_debug_test.go @@ -22,10 +22,12 @@ import ( "github.com/hashicorp/nomad/ci" clienttest "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/command/agent" + mon "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/testutil" + "github.com/shoenig/test/must" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -1070,3 +1072,124 @@ func extractArchiveName(captureOutput string) string { return file } + +func TestDebug_MonitorExportFiles(t *testing.T) { + f := mon.PrepFile(t).Name() + setLogFile := func(c *agent.Config) { + c.LogFile = f + } + srv, _, url := testServer(t, true, setLogFile) + testutil.WaitForLeader(t, srv.Agent.RPC) + logFileContents, err := os.ReadFile(f) + must.NoError(t, err) + serverNodeName := srv.Config.NodeName + region := srv.Config.Region + serverName := fmt.Sprintf("%s.%s", serverNodeName, region) + clientID := srv.Agent.Client().NodeID() + testutil.WaitForClient(t, srv.Agent.Client().RPC, clientID, srv.Agent.Client().Region()) + + testDir := t.TempDir() + defer os.Remove(testDir) + + duration := 2 * time.Second + interval := 750 * time.Millisecond + waitTime := 2 * duration + + baseArgs := []string{ + "-address", url, + "-output", testDir, + "-server-id", serverName, + "-node-id", clientID, + "-duration", duration.String(), + "-interval", interval.String(), + } + + cases := []struct { + name string + cmdArgs []string + errString string + runErr bool + wantExporter bool + }{ + { + name: "exporter", + cmdArgs: []string{"-log-file-export"}, + wantExporter: true, + }, + { + name: "no_exporter", + wantExporter: false, + }, + { + name: "bad_value_for_log_lookback", + cmdArgs: []string{"-log-lookback", "blue"}, + errString: "Error parsing -log-lookback", + runErr: true, + wantExporter: false, + }, + { + name: "set_both_flags", + cmdArgs: []string{ + "-log-lookback", "5h", + "-log-file-export", + }, + errString: "Error parsing inputs, -log-file-export and -log-lookback cannot be used together", + runErr: true, + wantExporter: false, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + clientFiles := []string{ + "monitor.log", + "monitor_export.log", + } + args := baseArgs + if len(tc.cmdArgs) > 0 { + args = append(args, tc.cmdArgs...) + } + + serverFiles := []string{ + "monitor.log", + "monitor_export.log", + } + ui := cli.NewMockUi() + cmd := &OperatorDebugCommand{Meta: Meta{Ui: ui}} + + code := cmd.Run(args) + if tc.runErr { + must.One(t, code) + must.StrContains(t, ui.ErrorWriter.String(), tc.errString) + return + } else { + must.Zero(t, code) + } + + // Wait until client's monitor.log file is written + clientPaths := buildPathSlice(cmd.path(clientDir, clientID), clientFiles) + t.Logf("Waiting for client files in path: %s", clientDir) + + testutil.WaitForFilesUntil(t, clientPaths[:0], waitTime) + + // Wait until server's monitor.log file is written + serverPaths := buildPathSlice(cmd.path(serverDir, serverName), serverFiles) + t.Logf("Waiting for server files in path: %s", serverDir) + testutil.WaitForFilesUntil(t, serverPaths[:0], waitTime) + + // Validate historical log files exist and match expected value + clientLog, clientReadErr := os.ReadFile(clientPaths[1]) + serverLog, serverReadErr := os.ReadFile(serverPaths[1]) + if tc.wantExporter { + must.NoError(t, clientReadErr) + must.NoError(t, serverReadErr) + // Verify monitor export file contents as expected + must.Eq(t, logFileContents, serverLog) + must.Eq(t, logFileContents, clientLog) + } else { + must.NotNil(t, clientReadErr) + must.NotNil(t, serverReadErr) + } + + }) + } +} diff --git a/nomad/client_agent_endpoint_test.go b/nomad/client_agent_endpoint_test.go index 9896c9f74..91832ce92 100644 --- a/nomad/client_agent_endpoint_test.go +++ b/nomad/client_agent_endpoint_test.go @@ -1032,14 +1032,7 @@ func TestMonitor_MonitorExport(t *testing.T) { shortText = "log log log log log" ) // Create test file - dir := t.TempDir() - f, err := os.CreateTemp(dir, "log") - must.NoError(t, err) - for range 1000 { - _, _ = f.WriteString(fmt.Sprintf("%v [INFO] it's log, it's log, it's big it's heavy it's wood", time.Now())) - } - f.Close() - longFilePath := f.Name() + longFilePath := monitor.PrepFile(t).Name() longFileContents, err := os.ReadFile(longFilePath) must.NoError(t, err) diff --git a/website/content/commands/operator/debug.mdx b/website/content/commands/operator/debug.mdx index 0ac62b1f5..5fec856d0 100644 --- a/website/content/commands/operator/debug.mdx +++ b/website/content/commands/operator/debug.mdx @@ -54,6 +54,19 @@ true. - `-log-include-location`: Include file and line information in each log line monitored. The default is `true`. +- `log-file-export`: Include agents' Nomad logfiles in the debug capture. + The historical log export monitor runs concurrently with the log monitor + and ignores the `-log-level` and `-log-include-location` flags used to + configure that monitor. Nomad will return an error if the agent does not + have file logging configured. Cannot be used with `-log-lookback`. + +- `log-lookback`: Include historical journald logs in the debug capture. The + journald export monitor runs concurrently with the log monitor and ignores + the `-log-level` and `-log-include-location` flags passed to that monitor. + This flag is only available on Linux systems using systemd, see the + `-log-file-export` flag to retrieve historical logs from non-Linux systems, + or those without systemd. Cannot be used with `-log-file-export`. + - `-max-nodes=`: Cap the maximum number of client nodes included in the capture. Defaults to 10, set to 0 for unlimited. From 8f74807891371aa8c7903d88b7ce44fce05c3fc8 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 4 Aug 2025 17:03:21 -0400 Subject: [PATCH 27/27] tests: fix conflict from parallelism in state store variables test (#26426) The state store test for Variables check-and-set behavior for deletes uses the same state store for a set of parallel tests. But one of the tests overlaps another by using the same path, and this can cause spurious test failures by hitting the CAS conflict error. This overlap doesn't appear to be intentional, so change the test to use a different path. Also cleaned up some unused test helpers in the same file. --- nomad/state/state_store_variables_test.go | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/nomad/state/state_store_variables_test.go b/nomad/state/state_store_variables_test.go index 1847197de..39b8d982e 100644 --- a/nomad/state/state_store_variables_test.go +++ b/nomad/state/state_store_variables_test.go @@ -4,7 +4,6 @@ package state import ( - "encoding/json" "errors" "sort" "strings" @@ -735,22 +734,6 @@ func TestStateStore_ListVariablesByKeyID(t *testing.T) { must.Eq(t, 5, count) } -func printVariable(tsv *structs.VariableEncrypted) string { - b, _ := json.Marshal(tsv) - return string(b) -} - -func printVariables(tsvs []*structs.VariableEncrypted) string { - if len(tsvs) == 0 { - return "" - } - var out strings.Builder - for _, tsv := range tsvs { - out.WriteString(printVariable(tsv) + "\n") - } - return out.String() -} - // TestStateStore_Variables_DeleteCAS func TestStateStore_Variables_DeleteCAS(t *testing.T) { ci.Parallel(t) @@ -822,7 +805,7 @@ func TestStateStore_Variables_DeleteCAS(t *testing.T) { t.Run("real_locked_var-cas_0", func(t *testing.T) { ci.Parallel(t) sv := mock.VariableEncrypted() - sv.Path = "real_var/cas_0" + sv.Path = "real_locked_var/cas_0" resp := ts.VarSet(10, &structs.VarApplyStateRequest{ Op: structs.VarOpSet, Var: sv,