ci: Update golangci-lint to v2 and fix highlighted issues. (#26334)

This commit is contained in:
James Rasell
2025-07-25 11:44:08 +02:00
committed by GitHub
parent 2ef837f02f
commit 5989d5862a
41 changed files with 275 additions and 275 deletions

View File

@@ -1,8 +1,8 @@
# Copyright (c) HashiCorp, Inc. # Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1 # SPDX-License-Identifier: BUSL-1.1
version: "2"
run: run:
# Timeout for analysis.
timeout: 10m timeout: 10m
# Modules download mode (do not modify go.mod) # Modules download mode (do not modify go.mod)
@@ -11,82 +11,71 @@ run:
# Exclude test files # Exclude test files
tests: false tests: false
# Skip ui and generated files
issues:
exclude-files:
- ".*\\.generated\\.go$"
- ".*bindata_assetfs\\.go$"
skip-dirs:
- ui
# Output configuration options
output: output:
formats: formats:
- format: colored-line-number text:
path: stdout path: stdout
print-linter-name: true
# print lines of code with issue, default is true print-issued-lines: true
print-issued-lines: true
# print linter name in the end of issue text, default is true
print-linter-name: true
# all available settings of specific linters
linters-settings:
errcheck:
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
# default is false: such cases aren't reported by default.
check-type-assertions: false
exclude-functions:
- io.*
- fmt.*
# path to a file containing a list of functions to exclude from checking
# see https://github.com/kisielk/errcheck#excluding-functions for details
# exclude: /path/to/file.txt
govet:
# report about shadowed variables
disable:
- shadow
gofmt:
# simplify code: gofmt with `-s` option, true by default
simplify: true
gocritic:
disabled-checks:
- commentFormatting
- deprecatedComment
staticcheck:
# I(jrasell) will work on enabling additional checks when possible.
checks: ["ST1020", "ST1016"]
issues:
exclude:
- ifElseChain
- singleCaseSwitch
- assignOp
- unlambda
linters: linters:
disable-all: true default: none
enable: enable:
- goimports
- gocritic
- misspell
- govet
- ineffassign
- unconvert
- gofmt
- gosimple
- staticcheck
- asasalint - asasalint
- asciicheck - asciicheck
- bidichk - bidichk
- bodyclose - bodyclose
- copyloopvar
- dogsled - dogsled
- durationcheck - durationcheck
# - errchkjson (todo) - gocritic
# - errorlint (todo) - govet
- copyloopvar - ineffassign
- misspell
- staticcheck
- unconvert
- usestdlibvars - usestdlibvars
fast: false settings:
errcheck:
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
# default is false: such cases aren't reported by default.
check-type-assertions: false
exclude-functions:
- io.*
- fmt.*
gocritic:
disabled-checks:
- commentFormatting
- deprecatedComment
govet:
disable:
- shadow
staticcheck:
checks:
- ST1016
- ST1020
exclusions:
rules:
- path: (.+)\.go$
text: ifElseChain
- path: (.+)\.go$
text: singleCaseSwitch
- path: (.+)\.go$
text: assignOp
- path: (.+)\.go$
text: unlambda
paths:
- ".*\\.generated\\.go$"
- ".*bindata_assetfs\\.go$"
formatters:
enable:
- gofmt
- goimports
settings:
gofmt:
simplify: true
exclusions:
paths:
- ".*\\.generated\\.go$"
- ".*bindata_assetfs\\.go$"

View File

@@ -143,7 +143,7 @@ deps: ## Install build and development dependencies
.PHONY: lint-deps .PHONY: lint-deps
lint-deps: ## Install linter dependencies lint-deps: ## Install linter dependencies
@echo "==> Updating linter dependencies..." @echo "==> Updating linter dependencies..."
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.64.5 go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.3.0
go install github.com/client9/misspell/cmd/misspell@v0.3.4 go install github.com/client9/misspell/cmd/misspell@v0.3.4
go install github.com/hashicorp/go-hclog/hclogvet@feaf6d2ec20fd895e711195c99e3fde93a68afc5 go install github.com/hashicorp/go-hclog/hclogvet@feaf6d2ec20fd895e711195c99e3fde93a68afc5

View File

@@ -156,7 +156,7 @@ func (a *Agent) Members() (*ServerMembers, error) {
return resp, nil return resp, nil
} }
// Members is used to query all of the known server members // MembersOpts is used to query all of the known server members
// with the ability to set QueryOptions // with the ability to set QueryOptions
func (a *Agent) MembersOpts(opts *QueryOptions) (*ServerMembers, error) { func (a *Agent) MembersOpts(opts *QueryOptions) (*ServerMembers, error) {
var resp *ServerMembers var resp *ServerMembers

View File

@@ -102,10 +102,11 @@ func (v *CSIVolumes) Create(vol *CSIVolume, w *WriteOptions) ([]*CSIVolume, *Wri
return resp.Volumes, meta, err return resp.Volumes, meta, err
} }
// DEPRECATED: will be removed in Nomad 1.4.0
// Delete deletes a CSI volume from an external storage provider. The ID // Delete deletes a CSI volume from an external storage provider. The ID
// passed as an argument here is for the storage provider's ID, so a volume // passed as an argument here is for the storage provider's ID, so a volume
// that's already been deregistered can be deleted. // that's already been deregistered can be deleted.
//
// Deprecated: will be removed in Nomad 1.4.0
func (v *CSIVolumes) Delete(externalVolID string, w *WriteOptions) error { func (v *CSIVolumes) Delete(externalVolID string, w *WriteOptions) error {
_, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v/delete", url.PathEscape(externalVolID)), nil, nil, w) _, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v/delete", url.PathEscape(externalVolID)), nil, nil, w)
return err return err
@@ -184,8 +185,9 @@ func (v *CSIVolumes) ListSnapshotsOpts(req *CSISnapshotListRequest) (*CSISnapsho
return resp, qm, nil return resp, qm, nil
} }
// DEPRECATED: will be removed in Nomad 1.4.0
// ListSnapshots lists external storage volume snapshots. // ListSnapshots lists external storage volume snapshots.
//
// Deprecated: will be removed in Nomad 1.4.0
func (v *CSIVolumes) ListSnapshots(pluginID string, secrets string, q *QueryOptions) (*CSISnapshotListResponse, *QueryMeta, error) { func (v *CSIVolumes) ListSnapshots(pluginID string, secrets string, q *QueryOptions) (*CSISnapshotListResponse, *QueryMeta, error) {
var resp *CSISnapshotListResponse var resp *CSISnapshotListResponse
@@ -269,26 +271,26 @@ func (o *CSIMountOptions) Merge(p *CSIMountOptions) {
// API or in Nomad's logs. // API or in Nomad's logs.
type CSISecrets map[string]string type CSISecrets map[string]string
func (q *QueryOptions) SetHeadersFromCSISecrets(secrets CSISecrets) { func (o *QueryOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
pairs := []string{} pairs := []string{}
for k, v := range secrets { for k, v := range secrets {
pairs = append(pairs, fmt.Sprintf("%v=%v", k, v)) pairs = append(pairs, fmt.Sprintf("%v=%v", k, v))
} }
if q.Headers == nil { if o.Headers == nil {
q.Headers = map[string]string{} o.Headers = map[string]string{}
} }
q.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",") o.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
} }
func (w *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) { func (o *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
pairs := []string{} pairs := []string{}
for k, v := range secrets { for k, v := range secrets {
pairs = append(pairs, fmt.Sprintf("%v=%v", k, v)) pairs = append(pairs, fmt.Sprintf("%v=%v", k, v))
} }
if w.Headers == nil { if o.Headers == nil {
w.Headers = map[string]string{} o.Headers = map[string]string{}
} }
w.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",") o.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
} }
// CSIVolume is used for serialization, see also nomad/structs/csi.go // CSIVolume is used for serialization, see also nomad/structs/csi.go

View File

@@ -12,8 +12,8 @@ import (
"strings" "strings"
) )
// Checks the current executable, then $GOPATH/bin, and finally the CWD, in that // NomadExecutable checks the current executable, then $GOPATH/bin, and finally
// order. If it can't be found, an error is returned. // the CWD, in that order. If it can't be found, an error is returned.
func NomadExecutable() (string, error) { func NomadExecutable() (string, error) {
nomadExe := "nomad" nomadExe := "nomad"
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {

View File

@@ -176,7 +176,7 @@ func (j *Jobs) List(q *QueryOptions) ([]*JobListStub, *QueryMeta, error) {
return j.ListOptions(nil, q) return j.ListOptions(nil, q)
} }
// List is used to list all of the existing jobs. // ListOptions is used to list all of the existing jobs.
func (j *Jobs) ListOptions(opts *JobListOptions, q *QueryOptions) ([]*JobListStub, *QueryMeta, error) { func (j *Jobs) ListOptions(opts *JobListOptions, q *QueryOptions) ([]*JobListStub, *QueryMeta, error) {
var resp []*JobListStub var resp []*JobListStub

View File

@@ -126,7 +126,7 @@ func (n *Nodes) UpdateDrain(nodeID string, spec *DrainSpec, markEligible bool, q
return resp, err return resp, err
} }
// UpdateDrainWithMeta is used to update the drain strategy for a given node. If // UpdateDrainOpts is used to update the drain strategy for a given node. If
// markEligible is true and the drain is being removed, the node will be marked // markEligible is true and the drain is being removed, the node will be marked
// as having its scheduling being eligible // as having its scheduling being eligible
func (n *Nodes) UpdateDrainOpts(nodeID string, opts *DrainOptions, q *WriteOptions) (*NodeDrainUpdateResponse, func (n *Nodes) UpdateDrainOpts(nodeID string, opts *DrainOptions, q *WriteOptions) (*NodeDrainUpdateResponse,
@@ -478,7 +478,7 @@ func (n *Nodes) GC(nodeID string, q *QueryOptions) error {
return err return err
} }
// TODO Add tests // GcAlloc - TODO Add tests
func (n *Nodes) GcAlloc(allocID string, q *QueryOptions) error { func (n *Nodes) GcAlloc(allocID string, q *QueryOptions) error {
path := fmt.Sprintf("/v1/client/allocation/%s/gc", allocID) path := fmt.Sprintf("/v1/client/allocation/%s/gc", allocID)
_, err := n.client.query(path, nil, q) _, err := n.client.query(path, nil, q)

View File

@@ -51,7 +51,7 @@ func (q *Quotas) ListUsage(qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error)
return resp, qm, nil return resp, qm, nil
} }
// PrefixList is used to do a PrefixList search over quota usages // PrefixListUsage is used to do a PrefixList search over quota usages
func (q *Quotas) PrefixListUsage(prefix string, qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error) { func (q *Quotas) PrefixListUsage(prefix string, qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error) {
if qo == nil { if qo == nil {
qo = &QueryOptions{Prefix: prefix} qo = &QueryOptions{Prefix: prefix}

View File

@@ -181,8 +181,10 @@ type NetworkResource struct {
CNI *CNIConfig `hcl:"cni,block"` CNI *CNIConfig `hcl:"cni,block"`
} }
// Megabits should not be used.
//
// COMPAT(0.13) // COMPAT(0.13)
// XXX Deprecated. Please do not use. The method will be removed in Nomad // Deprecated. Please do not use. The method will be removed in Nomad
// 0.13 and is only being kept to allow any references to be removed before // 0.13 and is only being kept to allow any references to be removed before
// then. // then.
func (n *NetworkResource) Megabits() int { func (n *NetworkResource) Megabits() int {

View File

@@ -310,14 +310,14 @@ func (r *ReschedulePolicy) Copy() *ReschedulePolicy {
return nrp return nrp
} }
func (p *ReschedulePolicy) String() string { func (r *ReschedulePolicy) String() string {
if p == nil { if r == nil {
return "" return ""
} }
if *p.Unlimited { if *r.Unlimited {
return fmt.Sprintf("unlimited with %v delay, max_delay = %v", *p.DelayFunction, *p.MaxDelay) return fmt.Sprintf("unlimited with %v delay, max_delay = %v", *r.DelayFunction, *r.MaxDelay)
} }
return fmt.Sprintf("%v in %v with %v delay, max_delay = %v", *p.Attempts, *p.Interval, *p.DelayFunction, *p.MaxDelay) return fmt.Sprintf("%v in %v with %v delay, max_delay = %v", *r.Attempts, *r.Interval, *r.DelayFunction, *r.MaxDelay)
} }
// Spread is used to serialize task group allocation spread preferences // Spread is used to serialize task group allocation spread preferences
@@ -664,7 +664,7 @@ func (g *TaskGroup) Constrain(c *Constraint) *TaskGroup {
return g return g
} }
// AddMeta is used to add a meta k/v pair to a task group // SetMeta is used to add a meta k/v pair to a task group
func (g *TaskGroup) SetMeta(key, val string) *TaskGroup { func (g *TaskGroup) SetMeta(key, val string) *TaskGroup {
if g.Meta == nil { if g.Meta == nil {
g.Meta = make(map[string]string) g.Meta = make(map[string]string)
@@ -697,7 +697,7 @@ func (g *TaskGroup) AddSpread(s *Spread) *TaskGroup {
return g return g
} }
// AddSpread is used to add a new spread preference to a task group. // ScalingPolicy is used to add a new scaling policy to a task group.
func (g *TaskGroup) ScalingPolicy(sp *ScalingPolicy) *TaskGroup { func (g *TaskGroup) ScalingPolicy(sp *ScalingPolicy) *TaskGroup {
g.Scaling = sp g.Scaling = sp
return g return g
@@ -751,7 +751,7 @@ type TaskLifecycle struct {
Sidecar bool `mapstructure:"sidecar" hcl:"sidecar,optional"` Sidecar bool `mapstructure:"sidecar" hcl:"sidecar,optional"`
} }
// Determine if lifecycle has user-input values // Empty determines if lifecycle has user-input values
func (l *TaskLifecycle) Empty() bool { func (l *TaskLifecycle) Empty() bool {
return l == nil return l == nil
} }
@@ -1048,7 +1048,7 @@ func NewTask(name, driver string) *Task {
} }
} }
// Configure is used to configure a single k/v pair on // SetConfig is used to configure a single k/v pair on
// the task. // the task.
func (t *Task) SetConfig(key string, val interface{}) *Task { func (t *Task) SetConfig(key string, val interface{}) *Task {
if t.Config == nil { if t.Config == nil {
@@ -1073,7 +1073,7 @@ func (t *Task) Require(r *Resources) *Task {
return t return t
} }
// Constraint adds a new constraints to a single task. // Constrain adds a new constraints to a single task.
func (t *Task) Constrain(c *Constraint) *Task { func (t *Task) Constrain(c *Constraint) *Task {
t.Constraints = append(t.Constraints, c) t.Constraints = append(t.Constraints, c)
return t return t

View File

@@ -173,17 +173,17 @@ func NewAllocDir(logger hclog.Logger, clientAllocDir, clientMountsDir, allocID s
} }
// NewTaskDir creates a new TaskDir and adds it to the AllocDirs TaskDirs map. // NewTaskDir creates a new TaskDir and adds it to the AllocDirs TaskDirs map.
func (d *AllocDir) NewTaskDir(task *structs.Task) *TaskDir { func (a *AllocDir) NewTaskDir(task *structs.Task) *TaskDir {
d.mu.Lock() a.mu.Lock()
defer d.mu.Unlock() defer a.mu.Unlock()
secretsSize := 0 secretsSize := 0
if task.Resources != nil { if task.Resources != nil {
secretsSize = task.Resources.SecretsMB secretsSize = task.Resources.SecretsMB
} }
td := d.newTaskDir(task.Name, secretsSize) td := a.newTaskDir(task.Name, secretsSize)
d.TaskDirs[task.Name] = td a.TaskDirs[task.Name] = td
return td return td
} }
@@ -193,13 +193,13 @@ func (d *AllocDir) NewTaskDir(task *structs.Task) *TaskDir {
// Since a valid tar may have been written even when an error occurs, a special // Since a valid tar may have been written even when an error occurs, a special
// file "NOMAD-${ALLOC_ID}-ERROR.log" will be appended to the tar with the // file "NOMAD-${ALLOC_ID}-ERROR.log" will be appended to the tar with the
// error message as the contents. // error message as the contents.
func (d *AllocDir) Snapshot(w io.Writer) error { func (a *AllocDir) Snapshot(w io.Writer) error {
d.mu.RLock() a.mu.RLock()
defer d.mu.RUnlock() defer a.mu.RUnlock()
allocDataDir := filepath.Join(d.SharedDir, SharedDataDir) allocDataDir := filepath.Join(a.SharedDir, SharedDataDir)
rootPaths := []string{allocDataDir} rootPaths := []string{allocDataDir}
for _, taskdir := range d.TaskDirs { for _, taskdir := range a.TaskDirs {
rootPaths = append(rootPaths, taskdir.LocalDir) rootPaths = append(rootPaths, taskdir.LocalDir)
} }
@@ -213,7 +213,7 @@ func (d *AllocDir) Snapshot(w io.Writer) error {
// Include the path of the file name relative to the alloc dir // Include the path of the file name relative to the alloc dir
// so that we can put the files in the right directories // so that we can put the files in the right directories
relPath, err := filepath.Rel(d.AllocDir, path) relPath, err := filepath.Rel(a.AllocDir, path)
if err != nil { if err != nil {
return err return err
} }
@@ -256,14 +256,14 @@ func (d *AllocDir) Snapshot(w io.Writer) error {
// directories in the archive // directories in the archive
for _, path := range rootPaths { for _, path := range rootPaths {
if err := filepath.Walk(path, walkFn); err != nil { if err := filepath.Walk(path, walkFn); err != nil {
allocID := filepath.Base(d.AllocDir) allocID := filepath.Base(a.AllocDir)
if writeErr := writeError(tw, allocID, err); writeErr != nil { if writeErr := writeError(tw, allocID, err); writeErr != nil {
// This could be bad; other side won't know // This could be bad; other side won't know
// snapshotting failed. It could also just mean // snapshotting failed. It could also just mean
// the snapshotting side closed the connect // the snapshotting side closed the connect
// prematurely and won't try to use the tar // prematurely and won't try to use the tar
// anyway. // anyway.
d.logger.Warn("snapshotting failed and unable to write error marker", "error", writeErr) a.logger.Warn("snapshotting failed and unable to write error marker", "error", writeErr)
} }
return fmt.Errorf("failed to snapshot %s: %w", path, err) return fmt.Errorf("failed to snapshot %s: %w", path, err)
} }
@@ -273,20 +273,20 @@ func (d *AllocDir) Snapshot(w io.Writer) error {
} }
// Move other alloc directory's shared path and local dir to this alloc dir. // Move other alloc directory's shared path and local dir to this alloc dir.
func (d *AllocDir) Move(other Interface, tasks []*structs.Task) error { func (a *AllocDir) Move(other Interface, tasks []*structs.Task) error {
d.mu.RLock() a.mu.RLock()
if !d.built { if !a.built {
// Enforce the invariant that Build is called before Move // Enforce the invariant that Build is called before Move
d.mu.RUnlock() a.mu.RUnlock()
return fmt.Errorf("unable to move to %q - alloc dir is not built", d.AllocDir) return fmt.Errorf("unable to move to %q - alloc dir is not built", a.AllocDir)
} }
// Moving is slow and only reads immutable fields, so unlock during heavy IO // Moving is slow and only reads immutable fields, so unlock during heavy IO
d.mu.RUnlock() a.mu.RUnlock()
// Move the data directory // Move the data directory
otherDataDir := filepath.Join(other.ShareDirPath(), SharedDataDir) otherDataDir := filepath.Join(other.ShareDirPath(), SharedDataDir)
dataDir := filepath.Join(d.SharedDir, SharedDataDir) dataDir := filepath.Join(a.SharedDir, SharedDataDir)
if fileInfo, err := os.Stat(otherDataDir); fileInfo != nil && err == nil { if fileInfo, err := os.Stat(otherDataDir); fileInfo != nil && err == nil {
os.Remove(dataDir) // remove an empty data dir if it exists os.Remove(dataDir) // remove an empty data dir if it exists
if err := os.Rename(otherDataDir, dataDir); err != nil { if err := os.Rename(otherDataDir, dataDir); err != nil {
@@ -302,7 +302,7 @@ func (d *AllocDir) Move(other Interface, tasks []*structs.Task) error {
fileInfo, err := os.Stat(otherTaskLocal) fileInfo, err := os.Stat(otherTaskLocal)
if fileInfo != nil && err == nil { if fileInfo != nil && err == nil {
// TaskDirs haven't been built yet, so create it // TaskDirs haven't been built yet, so create it
newTaskDir := filepath.Join(d.AllocDir, task.Name) newTaskDir := filepath.Join(a.AllocDir, task.Name)
if err := os.MkdirAll(newTaskDir, fileMode777); err != nil { if err := os.MkdirAll(newTaskDir, fileMode777); err != nil {
return fmt.Errorf("error creating task %q dir: %w", task.Name, err) return fmt.Errorf("error creating task %q dir: %w", task.Name, err)
} }
@@ -318,31 +318,31 @@ func (d *AllocDir) Move(other Interface, tasks []*structs.Task) error {
} }
// Destroy tears down previously build directory structure. // Destroy tears down previously build directory structure.
func (d *AllocDir) Destroy() error { func (a *AllocDir) Destroy() error {
// Unmount all mounted shared alloc dirs. // Unmount all mounted shared alloc dirs.
mErr := new(multierror.Error) mErr := new(multierror.Error)
if err := d.UnmountAll(); err != nil { if err := a.UnmountAll(); err != nil {
mErr = multierror.Append(mErr, err) mErr = multierror.Append(mErr, err)
} }
if err := os.RemoveAll(d.AllocDir); err != nil { if err := os.RemoveAll(a.AllocDir); err != nil {
mErr = multierror.Append(mErr, fmt.Errorf("failed to remove alloc dir %q: %w", d.AllocDir, err)) mErr = multierror.Append(mErr, fmt.Errorf("failed to remove alloc dir %q: %w", a.AllocDir, err))
} }
// Unset built since the alloc dir has been destroyed. // Unset built since the alloc dir has been destroyed.
d.mu.Lock() a.mu.Lock()
d.built = false a.built = false
d.mu.Unlock() a.mu.Unlock()
return mErr.ErrorOrNil() return mErr.ErrorOrNil()
} }
// UnmountAll linked/mounted directories in task dirs. // UnmountAll linked/mounted directories in task dirs.
func (d *AllocDir) UnmountAll() error { func (a *AllocDir) UnmountAll() error {
d.mu.RLock() a.mu.RLock()
defer d.mu.RUnlock() defer a.mu.RUnlock()
mErr := new(multierror.Error) mErr := new(multierror.Error)
for _, dir := range d.TaskDirs { for _, dir := range a.TaskDirs {
if err := dir.Unmount(); err != nil { if err := dir.Unmount(); err != nil {
mErr = multierror.Append(mErr, err) mErr = multierror.Append(mErr, err)
} }
@@ -352,41 +352,41 @@ func (d *AllocDir) UnmountAll() error {
} }
// Build the directory tree for an allocation. // Build the directory tree for an allocation.
func (d *AllocDir) Build() error { func (a *AllocDir) Build() error {
// Make the alloc directory, owned by the nomad process. // Make the alloc directory, owned by the nomad process.
if err := os.MkdirAll(d.AllocDir, fileMode755); err != nil { if err := os.MkdirAll(a.AllocDir, fileMode755); err != nil {
return fmt.Errorf("Failed to make the alloc directory %v: %w", d.AllocDir, err) return fmt.Errorf("Failed to make the alloc directory %v: %w", a.AllocDir, err)
} }
// Make the shared directory and make it available to all user/groups. // Make the shared directory and make it available to all user/groups.
if err := allocMkdirAll(d.SharedDir, fileMode755); err != nil { if err := allocMkdirAll(a.SharedDir, fileMode755); err != nil {
return err return err
} }
// Create shared subdirs // Create shared subdirs
for _, dir := range SharedAllocDirs { for _, dir := range SharedAllocDirs {
p := filepath.Join(d.SharedDir, dir) p := filepath.Join(a.SharedDir, dir)
if err := allocMkdirAll(p, fileMode777); err != nil { if err := allocMkdirAll(p, fileMode777); err != nil {
return err return err
} }
} }
// Mark as built // Mark as built
d.mu.Lock() a.mu.Lock()
d.built = true a.built = true
d.mu.Unlock() a.mu.Unlock()
return nil return nil
} }
// List returns the list of files at a path relative to the alloc dir // List returns the list of files at a path relative to the alloc dir
func (d *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) { func (a *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) {
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil {
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err)
} else if escapes { } else if escapes {
return nil, fmt.Errorf("Path escapes the alloc directory") return nil, fmt.Errorf("Path escapes the alloc directory")
} }
p := filepath.Join(d.AllocDir, path) p := filepath.Join(a.AllocDir, path)
finfos, err := os.ReadDir(p) finfos, err := os.ReadDir(p)
if err != nil { if err != nil {
return []*cstructs.AllocFileInfo{}, err return []*cstructs.AllocFileInfo{}, err
@@ -409,14 +409,14 @@ func (d *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) {
} }
// Stat returns information about the file at a path relative to the alloc dir // Stat returns information about the file at a path relative to the alloc dir
func (d *AllocDir) Stat(path string) (*cstructs.AllocFileInfo, error) { func (a *AllocDir) Stat(path string) (*cstructs.AllocFileInfo, error) {
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil {
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err)
} else if escapes { } else if escapes {
return nil, fmt.Errorf("Path escapes the alloc directory") return nil, fmt.Errorf("Path escapes the alloc directory")
} }
p := filepath.Join(d.AllocDir, path) p := filepath.Join(a.AllocDir, path)
info, err := os.Stat(p) info, err := os.Stat(p)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -459,28 +459,28 @@ func detectContentType(fileInfo os.FileInfo, path string) string {
} }
// ReadAt returns a reader for a file at the path relative to the alloc dir // ReadAt returns a reader for a file at the path relative to the alloc dir
func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) { func (a *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) {
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil {
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err)
} else if escapes { } else if escapes {
return nil, fmt.Errorf("Path escapes the alloc directory") return nil, fmt.Errorf("Path escapes the alloc directory")
} }
p := filepath.Join(d.AllocDir, path) p := filepath.Join(a.AllocDir, path)
// Check if it is trying to read into a secret directory // Check if it is trying to read into a secret directory
d.mu.RLock() a.mu.RLock()
for _, dir := range d.TaskDirs { for _, dir := range a.TaskDirs {
if caseInsensitiveHasPrefix(p, dir.SecretsDir) { if caseInsensitiveHasPrefix(p, dir.SecretsDir) {
d.mu.RUnlock() a.mu.RUnlock()
return nil, fmt.Errorf("Reading secret file prohibited: %s", path) return nil, fmt.Errorf("Reading secret file prohibited: %s", path)
} }
if caseInsensitiveHasPrefix(p, dir.PrivateDir) { if caseInsensitiveHasPrefix(p, dir.PrivateDir) {
d.mu.RUnlock() a.mu.RUnlock()
return nil, fmt.Errorf("Reading private file prohibited: %s", path) return nil, fmt.Errorf("Reading private file prohibited: %s", path)
} }
} }
d.mu.RUnlock() a.mu.RUnlock()
f, err := os.Open(p) f, err := os.Open(p)
if err != nil { if err != nil {
@@ -499,15 +499,15 @@ func caseInsensitiveHasPrefix(s, prefix string) bool {
// BlockUntilExists blocks until the passed file relative the allocation // BlockUntilExists blocks until the passed file relative the allocation
// directory exists. The block can be cancelled with the passed context. // directory exists. The block can be cancelled with the passed context.
func (d *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan error, error) { func (a *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan error, error) {
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil {
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err)
} else if escapes { } else if escapes {
return nil, fmt.Errorf("Path escapes the alloc directory") return nil, fmt.Errorf("Path escapes the alloc directory")
} }
// Get the path relative to the alloc directory // Get the path relative to the alloc directory
p := filepath.Join(d.AllocDir, path) p := filepath.Join(a.AllocDir, path)
watcher := getFileWatcher(p) watcher := getFileWatcher(p)
returnCh := make(chan error, 1) returnCh := make(chan error, 1)
t := &tomb.Tomb{} t := &tomb.Tomb{}
@@ -525,8 +525,8 @@ func (d *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan erro
// ChangeEvents watches for changes to the passed path relative to the // ChangeEvents watches for changes to the passed path relative to the
// allocation directory. The offset should be the last read offset. The context is // allocation directory. The offset should be the last read offset. The context is
// used to clean up the watch. // used to clean up the watch.
func (d *AllocDir) ChangeEvents(ctx context.Context, path string, curOffset int64) (*watch.FileChanges, error) { func (a *AllocDir) ChangeEvents(ctx context.Context, path string, curOffset int64) (*watch.FileChanges, error) {
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil {
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err)
} else if escapes { } else if escapes {
return nil, fmt.Errorf("Path escapes the alloc directory") return nil, fmt.Errorf("Path escapes the alloc directory")
@@ -539,7 +539,7 @@ func (d *AllocDir) ChangeEvents(ctx context.Context, path string, curOffset int6
}() }()
// Get the path relative to the alloc directory // Get the path relative to the alloc directory
p := filepath.Join(d.AllocDir, path) p := filepath.Join(a.AllocDir, path)
watcher := getFileWatcher(p) watcher := getFileWatcher(p)
return watcher.ChangeEvents(t, curOffset) return watcher.ChangeEvents(t, curOffset)
} }

View File

@@ -98,28 +98,28 @@ type TaskDir struct {
// create paths on disk. // create paths on disk.
// //
// Call AllocDir.NewTaskDir to create new TaskDirs // Call AllocDir.NewTaskDir to create new TaskDirs
func (d *AllocDir) newTaskDir(taskName string, secretsInMB int) *TaskDir { func (a *AllocDir) newTaskDir(taskName string, secretsInMB int) *TaskDir {
taskDir := filepath.Join(d.AllocDir, taskName) taskDir := filepath.Join(a.AllocDir, taskName)
taskUnique := filepath.Base(d.AllocDir) + "-" + taskName taskUnique := filepath.Base(a.AllocDir) + "-" + taskName
if secretsInMB == 0 { if secretsInMB == 0 {
secretsInMB = defaultSecretDirTmpfsSize secretsInMB = defaultSecretDirTmpfsSize
} }
return &TaskDir{ return &TaskDir{
AllocDir: d.AllocDir, AllocDir: a.AllocDir,
Dir: taskDir, Dir: taskDir,
SharedAllocDir: filepath.Join(d.AllocDir, SharedAllocName), SharedAllocDir: filepath.Join(a.AllocDir, SharedAllocName),
LogDir: filepath.Join(d.AllocDir, SharedAllocName, LogDirName), LogDir: filepath.Join(a.AllocDir, SharedAllocName, LogDirName),
SharedTaskDir: filepath.Join(taskDir, SharedAllocName), SharedTaskDir: filepath.Join(taskDir, SharedAllocName),
LocalDir: filepath.Join(taskDir, TaskLocal), LocalDir: filepath.Join(taskDir, TaskLocal),
SecretsDir: filepath.Join(taskDir, TaskSecrets), SecretsDir: filepath.Join(taskDir, TaskSecrets),
PrivateDir: filepath.Join(taskDir, TaskPrivate), PrivateDir: filepath.Join(taskDir, TaskPrivate),
MountsAllocDir: filepath.Join(d.clientAllocMountsDir, taskUnique, "alloc"), MountsAllocDir: filepath.Join(a.clientAllocMountsDir, taskUnique, "alloc"),
MountsTaskDir: filepath.Join(d.clientAllocMountsDir, taskUnique), MountsTaskDir: filepath.Join(a.clientAllocMountsDir, taskUnique),
MountsSecretsDir: filepath.Join(d.clientAllocMountsDir, taskUnique, "secrets"), MountsSecretsDir: filepath.Join(a.clientAllocMountsDir, taskUnique, "secrets"),
skip: set.From[string]([]string{d.clientAllocDir, d.clientAllocMountsDir}), skip: set.From[string]([]string{a.clientAllocDir, a.clientAllocMountsDir}),
logger: d.logger.Named("task_dir").With("task_name", taskName), logger: a.logger.Named("task_dir").With("task_name", taskName),
secretsInMB: secretsInMB, secretsInMB: secretsInMB,
} }
} }

View File

@@ -88,7 +88,7 @@ func (e *editor) Write(filename, content string) error {
return os.WriteFile(path, []byte(content), 0644) return os.WriteFile(path, []byte(content), 0644)
} }
// A Factory creates a Lifecycle which is an abstraction over the setup and // Factory creates a Lifecycle which is an abstraction over the setup and
// teardown routines used for creating and destroying cgroups used for // teardown routines used for creating and destroying cgroups used for
// constraining Nomad tasks. // constraining Nomad tasks.
func Factory(allocID, task string, cores bool) Lifecycle { func Factory(allocID, task string, cores bool) Lifecycle {
@@ -106,7 +106,7 @@ func Factory(allocID, task string, cores bool) Lifecycle {
} }
} }
// A Lifecycle manages the lifecycle of the cgroup(s) of a task from the // Lifecycle manages the lifecycle of the cgroup(s) of a task from the
// perspective of the Nomad client. That is, it creates and deletes the cgroups // perspective of the Nomad client. That is, it creates and deletes the cgroups
// for a task, as well as provides last effort kill semantics for ensuring a // for a task, as well as provides last effort kill semantics for ensuring a
// process cannot stay alive beyond the intent of the client. // process cannot stay alive beyond the intent of the client.

View File

@@ -27,7 +27,8 @@ import (
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
// Returns an object representing the current OS thread's network namespace // GetCurrentNS returns an object representing the current OS thread's network
// namespace
func GetCurrentNS() (NetNS, error) { func GetCurrentNS() (NetNS, error) {
// Lock the thread in case other goroutine executes in it and changes its // Lock the thread in case other goroutine executes in it and changes its
// network namespace after getCurrentThreadNetNSPath(), otherwise it might // network namespace after getCurrentThreadNetNSPath(), otherwise it might
@@ -140,7 +141,7 @@ func IsNSorErr(nspath string) error {
} }
} }
// Returns an object representing the namespace referred to by @path // GetNS returns an object representing the namespace referred to by @path
func GetNS(nspath string) (NetNS, error) { func GetNS(nspath string) (NetNS, error) {
err := IsNSorErr(nspath) err := IsNSorErr(nspath)
if err != nil { if err != nil {

View File

@@ -87,18 +87,18 @@ type Topology struct {
OverrideWitholdCompute hw.MHz OverrideWitholdCompute hw.MHz
} }
func (t *Topology) SetNodes(nodes *idset.Set[hw.NodeID]) { func (st *Topology) SetNodes(nodes *idset.Set[hw.NodeID]) {
t.nodeIDs = nodes st.nodeIDs = nodes
if !nodes.Empty() { if !nodes.Empty() {
t.Nodes = nodes.Slice() st.Nodes = nodes.Slice()
} else { } else {
t.Nodes = []uint8{} st.Nodes = []uint8{}
} }
} }
func (t *Topology) SetNodesFrom(nodes []uint8) { func (st *Topology) SetNodesFrom(nodes []uint8) {
t.nodeIDs = idset.From[hw.NodeID](nodes) st.nodeIDs = idset.From[hw.NodeID](nodes)
t.Nodes = nodes st.Nodes = nodes
} }
// A Core represents one logical (vCPU) core on a processor. Basically the slice // A Core represents one logical (vCPU) core on a processor. Basically the slice

View File

@@ -2473,8 +2473,8 @@ func (s *ServerConfig) Merge(b *ServerConfig) *ServerConfig {
} }
// Merge is used to merge two client configs together // Merge is used to merge two client configs together
func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig { func (c *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
result := *a result := *c
if b.Enabled { if b.Enabled {
result.Enabled = true result.Enabled = true
@@ -2615,10 +2615,10 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
result.ServerJoin = result.ServerJoin.Merge(b.ServerJoin) result.ServerJoin = result.ServerJoin.Merge(b.ServerJoin)
} }
if len(a.HostVolumes) == 0 && len(b.HostVolumes) != 0 { if len(c.HostVolumes) == 0 && len(b.HostVolumes) != 0 {
result.HostVolumes = structs.CopySliceClientHostVolumeConfig(b.HostVolumes) result.HostVolumes = structs.CopySliceClientHostVolumeConfig(b.HostVolumes)
} else if len(b.HostVolumes) != 0 { } else if len(b.HostVolumes) != 0 {
result.HostVolumes = structs.HostVolumeSliceMerge(a.HostVolumes, b.HostVolumes) result.HostVolumes = structs.HostVolumeSliceMerge(c.HostVolumes, b.HostVolumes)
} }
if b.CNIPath != "" { if b.CNIPath != "" {
@@ -2640,7 +2640,7 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
result.BridgeNetworkHairpinMode = true result.BridgeNetworkHairpinMode = true
} }
result.HostNetworks = a.HostNetworks result.HostNetworks = c.HostNetworks
if len(b.HostNetworks) != 0 { if len(b.HostNetworks) != 0 {
result.HostNetworks = append(result.HostNetworks, b.HostNetworks...) result.HostNetworks = append(result.HostNetworks, b.HostNetworks...)
@@ -2660,9 +2660,9 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
result.CgroupParent = b.CgroupParent result.CgroupParent = b.CgroupParent
} }
result.Artifact = a.Artifact.Merge(b.Artifact) result.Artifact = c.Artifact.Merge(b.Artifact)
result.Drain = a.Drain.Merge(b.Drain) result.Drain = c.Drain.Merge(b.Drain)
result.Users = a.Users.Merge(b.Users) result.Users = c.Users.Merge(b.Users)
if b.NodeMaxAllocs != 0 { if b.NodeMaxAllocs != 0 {
result.NodeMaxAllocs = b.NodeMaxAllocs result.NodeMaxAllocs = b.NodeMaxAllocs
@@ -2789,8 +2789,8 @@ func (t *Telemetry) Merge(b *Telemetry) *Telemetry {
} }
// Merge is used to merge two port configurations. // Merge is used to merge two port configurations.
func (a *Ports) Merge(b *Ports) *Ports { func (p *Ports) Merge(b *Ports) *Ports {
result := *a result := *p
if b.HTTP != 0 { if b.HTTP != 0 {
result.HTTP = b.HTTP result.HTTP = b.HTTP

View File

@@ -272,7 +272,7 @@ func (c *MockAgent) CheckRegs() []*api.AgentCheckRegistration {
return regs return regs
} }
// CheckRegister implements AgentAPI // CheckRegisterOpts implements AgentAPI
func (c *MockAgent) CheckRegisterOpts(check *api.AgentCheckRegistration, _ *api.QueryOptions) error { func (c *MockAgent) CheckRegisterOpts(check *api.AgentCheckRegistration, _ *api.QueryOptions) error {
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock() defer c.mu.Unlock()

View File

@@ -168,7 +168,7 @@ type ACLsAPI interface {
// sidecar - Consul's view (agent, not catalog) of the service definition of the sidecar // sidecar - Consul's view (agent, not catalog) of the service definition of the sidecar
// associated with existing that may or may not exist. // associated with existing that may or may not exist.
// May be nil. // May be nil.
func (s *ServiceClient) agentServiceUpdateRequired(reason syncReason, wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool { func (c *ServiceClient) agentServiceUpdateRequired(reason syncReason, wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool {
switch reason { switch reason {
case syncPeriodic: case syncPeriodic:
// In a periodic sync with Consul, we need to respect the value of // In a periodic sync with Consul, we need to respect the value of
@@ -188,7 +188,7 @@ func (s *ServiceClient) agentServiceUpdateRequired(reason syncReason, wanted *ap
maybeTweakTaggedAddresses(wanted, existing) maybeTweakTaggedAddresses(wanted, existing)
// Okay now it is safe to compare. // Okay now it is safe to compare.
return s.different(wanted, existing, sidecar) return c.different(wanted, existing, sidecar)
default: default:
// A non-periodic sync with Consul indicates an operation has been set // A non-periodic sync with Consul indicates an operation has been set
@@ -200,7 +200,7 @@ func (s *ServiceClient) agentServiceUpdateRequired(reason syncReason, wanted *ap
maybeTweakTaggedAddresses(wanted, existing) maybeTweakTaggedAddresses(wanted, existing)
// Okay now it is safe to compare. // Okay now it is safe to compare.
return s.different(wanted, existing, sidecar) return c.different(wanted, existing, sidecar)
} }
} }
@@ -245,9 +245,9 @@ func maybeTweakTaggedAddresses(wanted *api.AgentServiceRegistration, existing *a
// different compares the wanted state of the service registration with the actual // different compares the wanted state of the service registration with the actual
// (cached) state of the service registration reported by Consul. If any of the // (cached) state of the service registration reported by Consul. If any of the
// critical fields are not deeply equal, they considered different. // critical fields are not deeply equal, they considered different.
func (s *ServiceClient) different(wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool { func (c *ServiceClient) different(wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool {
trace := func(field string, left, right any) { trace := func(field string, left, right any) {
s.logger.Trace("registrations different", "id", wanted.ID, c.logger.Trace("registrations different", "id", wanted.ID,
"field", field, "wanted", fmt.Sprintf("%#v", left), "existing", fmt.Sprintf("%#v", right), "field", field, "wanted", fmt.Sprintf("%#v", left), "existing", fmt.Sprintf("%#v", right),
) )
} }

View File

@@ -14,7 +14,7 @@ import (
"github.com/shirou/gopsutil/v3/process" "github.com/shirou/gopsutil/v3/process"
) )
// List the process tree starting at the given executorPID // ListByPid the process tree starting at the given executorPID
func ListByPid(executorPID int) set.Collection[ProcessID] { func ListByPid(executorPID int) set.Collection[ProcessID] {
result := set.New[ProcessID](10) result := set.New[ProcessID](10)

View File

@@ -579,7 +579,7 @@ func Verbose(on bool) Option {
} }
} }
// Set an HCL variable. // Var sets a HCL variable.
func Var(key, value string) Option { func Var(key, value string) Option {
return func(sub *Submission) { return func(sub *Submission) {
sub.vars[key] = value sub.vars[key] = value

View File

@@ -65,10 +65,10 @@ func (ns *Namespace) String() string {
return ns.Name return ns.Name
} }
func (n *Names) setClient() { func (g *Names) setClient() {
nomadClient, nomadErr := nomadapi.NewClient(nomadapi.DefaultConfig()) nomadClient, nomadErr := nomadapi.NewClient(nomadapi.DefaultConfig())
must.NoError(n.t, nomadErr, must.Sprint("failed to create nomad api client")) must.NoError(g.t, nomadErr, must.Sprint("failed to create nomad api client"))
n.nomadClient = nomadClient g.nomadClient = nomadClient
} }
func configure(t *testing.T, opts ...Option) Cleanup { func configure(t *testing.T, opts ...Option) Cleanup {
@@ -117,7 +117,7 @@ func Create(t *testing.T, name string, opts ...Option) Cleanup {
return configure(t, append(opts, opt)...) return configure(t, append(opts, opt)...)
} }
// Create namespaces of the given names. // CreateN namespaces of the given names.
func CreateN(t *testing.T, names []string, opts ...Option) Cleanup { func CreateN(t *testing.T, names []string, opts ...Option) Cleanup {
creations := helper.ConvertSlice(names, func(name string) Option { creations := helper.ConvertSlice(names, func(name string) Option {
namespace := &Namespace{Name: name} namespace := &Namespace{Name: name}

View File

@@ -35,8 +35,8 @@ func (g *Group) Wait() {
g.wg.Wait() g.wg.Wait()
} }
// Wait for all goroutines to exit, or for the context to finish. // WaitWithContext waits for all goroutines to exit, or for the context to
// Must be called after all calls to Go complete. // finish. Must be called after all calls to Go complete.
func (g *Group) WaitWithContext(ctx context.Context) { func (g *Group) WaitWithContext(ctx context.Context) {
doneCh := make(chan struct{}) doneCh := make(chan struct{})
go func() { go func() {

View File

@@ -67,6 +67,6 @@ func (h jobConsulHook) validateCluster(name string) error {
// Mutate ensures that the job's Consul cluster has been configured to be the // Mutate ensures that the job's Consul cluster has been configured to be the
// default Consul cluster if unset // default Consul cluster if unset
func (j jobConsulHook) Mutate(job *structs.Job) (*structs.Job, []error, error) { func (h jobConsulHook) Mutate(job *structs.Job) (*structs.Job, []error, error) {
return j.mutateImpl(job, structs.ConsulDefaultCluster), nil, nil return h.mutateImpl(job, structs.ConsulDefaultCluster), nil, nil
} }

View File

@@ -35,7 +35,7 @@ func (h jobVaultHook) validateClustersForNamespace(_ *structs.Job, blocks map[st
return nil return nil
} }
func (j jobVaultHook) Mutate(job *structs.Job) (*structs.Job, []error, error) { func (h jobVaultHook) Mutate(job *structs.Job) (*structs.Job, []error, error) {
for _, tg := range job.TaskGroups { for _, tg := range job.TaskGroups {
for _, task := range tg.Tasks { for _, task := range tg.Tasks {
if task.Vault == nil || task.Vault.Cluster != "" { if task.Vault == nil || task.Vault.Cluster != "" {

View File

@@ -98,7 +98,7 @@ func (t *TTLTimer) EmitMetrics(period time.Duration, shutdownCh chan struct{}) {
} }
} }
// timerNum returns the number of registered timers. // TimerNum returns the number of registered timers.
func (t *TTLTimer) TimerNum() int { func (t *TTLTimer) TimerNum() int {
t.lock.RLock() t.lock.RLock()
defer t.lock.RUnlock() defer t.lock.RUnlock()

View File

@@ -104,7 +104,7 @@ func (op *Operator) RaftGetConfiguration(args *structs.GenericRequest, reply *st
return nil return nil
} }
// COMPAT(1.12.0): RaftRemovePeerByAddress was used to support Raft Protocol v2, // RaftRemovePeerByAddress COMPAT(1.12.0) was used to support Raft Protocol v2,
// which was removed in Nomad 1.4.0 but the API was not removed. Remove this RPC // which was removed in Nomad 1.4.0 but the API was not removed. Remove this RPC
// entirely in Nomad 1.12.0. // entirely in Nomad 1.12.0.
func (op *Operator) RaftRemovePeerByAddress(_ *structs.RaftPeerByAddressRequest, _ *struct{}) error { func (op *Operator) RaftRemovePeerByAddress(_ *structs.RaftPeerByAddressRequest, _ *struct{}) error {

View File

@@ -43,8 +43,8 @@ func (s *SentinelImport) Copy() *SentinelImport {
} }
// Merge is used to merge two Sentinel configs together. The settings from the input always take precedence. // Merge is used to merge two Sentinel configs together. The settings from the input always take precedence.
func (a *SentinelConfig) Merge(b *SentinelConfig) *SentinelConfig { func (s *SentinelConfig) Merge(b *SentinelConfig) *SentinelConfig {
result := *a result := *s
if len(b.Imports) > 0 { if len(b.Imports) > 0 {
result.Imports = append(result.Imports, b.Imports...) result.Imports = append(result.Imports, b.Imports...)
} }

View File

@@ -48,20 +48,20 @@ type ContentSecurityPolicy struct {
} }
// Copy returns a copy of this Vault UI config. // Copy returns a copy of this Vault UI config.
func (old *ContentSecurityPolicy) Copy() *ContentSecurityPolicy { func (csp *ContentSecurityPolicy) Copy() *ContentSecurityPolicy {
if old == nil { if csp == nil {
return nil return nil
} }
nc := new(ContentSecurityPolicy) nc := new(ContentSecurityPolicy)
*nc = *old *nc = *csp
nc.ConnectSrc = slices.Clone(old.ConnectSrc) nc.ConnectSrc = slices.Clone(csp.ConnectSrc)
nc.DefaultSrc = slices.Clone(old.DefaultSrc) nc.DefaultSrc = slices.Clone(csp.DefaultSrc)
nc.FormAction = slices.Clone(old.FormAction) nc.FormAction = slices.Clone(csp.FormAction)
nc.FrameAncestors = slices.Clone(old.FrameAncestors) nc.FrameAncestors = slices.Clone(csp.FrameAncestors)
nc.ImgSrc = slices.Clone(old.ImgSrc) nc.ImgSrc = slices.Clone(csp.ImgSrc)
nc.ScriptSrc = slices.Clone(old.ScriptSrc) nc.ScriptSrc = slices.Clone(csp.ScriptSrc)
nc.StyleSrc = slices.Clone(old.StyleSrc) nc.StyleSrc = slices.Clone(csp.StyleSrc)
return nc return nc
} }

View File

@@ -24,7 +24,8 @@ type DeviceAccounterInstance struct {
Instances map[string]int Instances map[string]int
} }
// Locality returns the NodeDeviceLocality of the instance of the specific deviceID. // GetLocality returns the NodeDeviceLocality of the instance of the specific
// deviceID.
// //
// If no instance matching the deviceID is found, nil is returned. // If no instance matching the deviceID is found, nil is returned.
func (dai *DeviceAccounterInstance) GetLocality(instanceID string) *NodeDeviceLocality { func (dai *DeviceAccounterInstance) GetLocality(instanceID string) *NodeDeviceLocality {
@@ -163,9 +164,9 @@ func (d *DeviceAccounter) AddReserved(res *AllocatedDeviceResource) (collision b
} }
// FreeCount returns the number of free device instances // FreeCount returns the number of free device instances
func (i *DeviceAccounterInstance) FreeCount() int { func (dai *DeviceAccounterInstance) FreeCount() int {
count := 0 count := 0
for _, c := range i.Instances { for _, c := range dai.Instances {
if c == 0 { if c == 0 {
count++ count++
} }

View File

@@ -2827,23 +2827,23 @@ func portDiffs(old, new []Port, dynamic bool, contextual bool) []*ObjectDiff {
} }
func (r *NUMA) Diff(other *NUMA, contextual bool) *ObjectDiff { func (n *NUMA) Diff(other *NUMA, contextual bool) *ObjectDiff {
if r.Equal(other) { if n.Equal(other) {
return nil return nil
} }
diff := &ObjectDiff{Type: DiffTypeNone, Name: "NUMA"} diff := &ObjectDiff{Type: DiffTypeNone, Name: "NUMA"}
var oldPrimitiveFlat, newPrimitiveFlat map[string]string var oldPrimitiveFlat, newPrimitiveFlat map[string]string
if r == nil { if n == nil {
diff.Type = DiffTypeAdded diff.Type = DiffTypeAdded
newPrimitiveFlat = flatmap.Flatten(other, nil, true) newPrimitiveFlat = flatmap.Flatten(other, nil, true)
} else if other == nil { } else if other == nil {
diff.Type = DiffTypeDeleted diff.Type = DiffTypeDeleted
oldPrimitiveFlat = flatmap.Flatten(r, nil, true) oldPrimitiveFlat = flatmap.Flatten(n, nil, true)
} else { } else {
diff.Type = DiffTypeEdited diff.Type = DiffTypeEdited
oldPrimitiveFlat = flatmap.Flatten(r, nil, true) oldPrimitiveFlat = flatmap.Flatten(n, nil, true)
newPrimitiveFlat = flatmap.Flatten(other, nil, true) newPrimitiveFlat = flatmap.Flatten(other, nil, true)
} }
diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual)

View File

@@ -88,7 +88,7 @@ func (k *UnwrappedRootKey) Copy() *UnwrappedRootKey {
} }
} }
// MakeInactive returns a copy of the RootKey with the meta state set to active // MakeActive returns a copy of the RootKey with the meta state set to active
func (k *UnwrappedRootKey) MakeActive() *UnwrappedRootKey { func (k *UnwrappedRootKey) MakeActive() *UnwrappedRootKey {
meta := k.Meta.Copy() meta := k.Meta.Copy()
meta.State = RootKeyStateActive meta.State = RootKeyStateActive

View File

@@ -118,7 +118,7 @@ func (r LegacyNodeCpuResources) empty() bool {
return r.CpuShares == 0 || r.TotalCpuCores == 0 return r.CpuShares == 0 || r.TotalCpuCores == 0
} }
// NomadProcessorResources captures the CPU hardware resources of the Nomad node. // NodeProcessorResources captures the CPU hardware resources of the Nomad node.
// //
// In Nomad enterprise this structure is used to map tasks to NUMA nodes. // In Nomad enterprise this structure is used to map tasks to NUMA nodes.
type NodeProcessorResources struct { type NodeProcessorResources struct {
@@ -126,8 +126,9 @@ type NodeProcessorResources struct {
Topology *numalib.Topology // do not modify Topology *numalib.Topology // do not modify
} }
// partial struct serialization / copy / merge sadness means this struct can // Empty handles partial struct serialization / copy / merge sadness that means
// exist with no data, which is a condition we must detect during the upgrade path // this struct can exist with no data, which is a condition we must detect
// during the upgrade path.
func (r NodeProcessorResources) Empty() bool { func (r NodeProcessorResources) Empty() bool {
return r.Topology == nil || len(r.Topology.Cores) == 0 return r.Topology == nil || len(r.Topology.Cores) == 0
} }

View File

@@ -391,8 +391,8 @@ func (vd VariableDecrypted) Validate() error {
return nil return nil
} }
// A new variable can be crated just to support a lock, it doesn't require to hold // ValidateForLock ensures a new variable can be created just to support a lock,
// any items and it will validate the lock. // it doesn't require to hold any items and it will validate the lock.
func (vd VariableDecrypted) ValidateForLock() error { func (vd VariableDecrypted) ValidateForLock() error {
var mErr multierror.Error var mErr multierror.Error
if vd.Namespace == AllNamespacesSentinel { if vd.Namespace == AllNamespacesSentinel {

View File

@@ -182,7 +182,7 @@ func (w *Worker) Resume() {
} }
} }
// Resume transitions a worker to the stopping state. Check // Stop transitions a worker to the stopping state. Check
// to see if the worker stopped by calling IsStopped() // to see if the worker stopped by calling IsStopped()
func (w *Worker) Stop() { func (w *Worker) Stop() {
w.setStatus(WorkerStopping) w.setStatus(WorkerStopping)
@@ -250,7 +250,7 @@ func (w *Worker) setWorkerStatusLocked(newStatus WorkerStatus) {
w.status = newStatus w.status = newStatus
} }
// GetStatus returns the status of the Worker's Workload. // GetWorkloadStatus returns the status of the Worker's Workload.
func (w *Worker) GetWorkloadStatus() SchedulerWorkerStatus { func (w *Worker) GetWorkloadStatus() SchedulerWorkerStatus {
w.statusLock.RLock() w.statusLock.RLock()
defer w.statusLock.RUnlock() defer w.statusLock.RUnlock()
@@ -578,7 +578,7 @@ type ErrMinIndexDeadlineExceeded struct {
timeout time.Duration timeout time.Duration
} }
// Unwrapping an ErrMinIndexDeadlineExceeded always return // Unwrap an ErrMinIndexDeadlineExceeded that always returns
// context.DeadlineExceeded // context.DeadlineExceeded
func (ErrMinIndexDeadlineExceeded) Unwrap() error { func (ErrMinIndexDeadlineExceeded) Unwrap() error {
return context.DeadlineExceeded return context.DeadlineExceeded

View File

@@ -84,16 +84,16 @@ type ClientDriverConfig struct {
Topology *numalib.Topology Topology *numalib.Topology
} }
func (c *AgentConfig) toProto() *proto.NomadConfig { func (ac *AgentConfig) toProto() *proto.NomadConfig {
if c == nil { if ac == nil {
return nil return nil
} }
cfg := &proto.NomadConfig{} cfg := &proto.NomadConfig{}
if c.Driver != nil { if ac.Driver != nil {
cfg.Driver = &proto.NomadDriverConfig{ cfg.Driver = &proto.NomadDriverConfig{
ClientMaxPort: uint32(c.Driver.ClientMaxPort), ClientMaxPort: uint32(ac.Driver.ClientMaxPort),
ClientMinPort: uint32(c.Driver.ClientMinPort), ClientMinPort: uint32(ac.Driver.ClientMinPort),
Topology: nomadTopologyToProto(c.Driver.Topology), Topology: nomadTopologyToProto(ac.Driver.Topology),
} }
} }
return cfg return cfg

View File

@@ -33,9 +33,9 @@ func newDeviceAllocator(ctx Context, n *structs.Node) *deviceAllocator {
} }
} }
func (da *deviceAllocator) Copy() *deviceAllocator { func (d *deviceAllocator) Copy() *deviceAllocator {
accounter := da.DeviceAccounter.Copy() accounter := d.DeviceAccounter.Copy()
allocator := &deviceAllocator{accounter, da.ctx} allocator := &deviceAllocator{accounter, d.ctx}
return allocator return allocator
} }

View File

@@ -267,9 +267,9 @@ func (set allocSet) filterByTainted(state ClusterState) (untainted, migrate, los
// filterOutByClientStatus returns a new allocSet containing allocs that don't // filterOutByClientStatus returns a new allocSet containing allocs that don't
// have the specified client status // have the specified client status
func (a allocSet) filterOutByClientStatus(clientStatuses ...string) allocSet { func (set allocSet) filterOutByClientStatus(clientStatuses ...string) allocSet {
allocs := make(allocSet) allocs := make(allocSet)
for _, alloc := range a { for _, alloc := range set {
if !slices.Contains(clientStatuses, alloc.ClientStatus) { if !slices.Contains(clientStatuses, alloc.ClientStatus) {
allocs[alloc.ID] = alloc allocs[alloc.ID] = alloc
} }
@@ -280,9 +280,9 @@ func (a allocSet) filterOutByClientStatus(clientStatuses ...string) allocSet {
// filterByClientStatus returns a new allocSet containing allocs that have the // filterByClientStatus returns a new allocSet containing allocs that have the
// specified client status // specified client status
func (a allocSet) filterByClientStatus(clientStatus string) allocSet { func (set allocSet) filterByClientStatus(clientStatus string) allocSet {
allocs := make(allocSet) allocs := make(allocSet)
for _, alloc := range a { for _, alloc := range set {
if alloc.ClientStatus == clientStatus { if alloc.ClientStatus == clientStatus {
allocs[alloc.ID] = alloc allocs[alloc.ID] = alloc
} }
@@ -450,9 +450,9 @@ func updateByReschedulable(alloc *structs.Allocation, now time.Time, evalID stri
// delayByStopAfter returns a delay for any lost allocation that's got a // delayByStopAfter returns a delay for any lost allocation that's got a
// disconnect.stop_on_client_after configured // disconnect.stop_on_client_after configured
func (a allocSet) delayByStopAfter() (later []*delayedRescheduleInfo) { func (set allocSet) delayByStopAfter() (later []*delayedRescheduleInfo) {
now := time.Now().UTC() now := time.Now().UTC()
for _, a := range a { for _, a := range set {
if !a.ShouldClientStop() { if !a.ShouldClientStop() {
continue continue
} }
@@ -472,10 +472,10 @@ func (a allocSet) delayByStopAfter() (later []*delayedRescheduleInfo) {
// delayByLostAfter returns a delay for any unknown allocation // delayByLostAfter returns a delay for any unknown allocation
// that has disconnect.lost_after configured // that has disconnect.lost_after configured
func (a allocSet) delayByLostAfter(now time.Time) ([]*delayedRescheduleInfo, error) { func (set allocSet) delayByLostAfter(now time.Time) ([]*delayedRescheduleInfo, error) {
var later []*delayedRescheduleInfo var later []*delayedRescheduleInfo
for _, alloc := range a { for _, alloc := range set {
timeout := alloc.DisconnectTimeout(now) timeout := alloc.DisconnectTimeout(now)
if !timeout.After(now) { if !timeout.After(now) {
return nil, errors.New("unable to computing disconnecting timeouts") return nil, errors.New("unable to computing disconnecting timeouts")

View File

@@ -316,12 +316,14 @@ func (h *Harness) SetNoSubmit() {
h.noSubmit = true h.noSubmit = true
} }
// helper method to create allocations with given jobs and resources // CreateAlloc is helper method to create allocations with given jobs and
// resources
func CreateAlloc(id string, job *structs.Job, resource *structs.Resources) *structs.Allocation { func CreateAlloc(id string, job *structs.Job, resource *structs.Resources) *structs.Allocation {
return CreateAllocInner(id, job, resource, nil, nil) return CreateAllocInner(id, job, resource, nil, nil)
} }
// helper method to create allocation with network at the task group level // CreateAllocWithTaskgroupNetwork is is helper method to create allocation with
// network at the task group level
func CreateAllocWithTaskgroupNetwork(id string, job *structs.Job, resource *structs.Resources, tgNet *structs.NetworkResource) *structs.Allocation { func CreateAllocWithTaskgroupNetwork(id string, job *structs.Job, resource *structs.Resources, tgNet *structs.NetworkResource) *structs.Allocation {
return CreateAllocInner(id, job, resource, nil, tgNet) return CreateAllocInner(id, job, resource, nil, tgNet)
} }

View File

@@ -13,7 +13,8 @@ import (
"github.com/shoenig/test/must" "github.com/shoenig/test/must"
) )
// Assert CA file exists and is a valid CA Returns the CA // IsValidCertificate asserts the CA file exists and is a valid CA Returns the
// CA
func IsValidCertificate(t *testing.T, caPath string) *x509.Certificate { func IsValidCertificate(t *testing.T, caPath string) *x509.Certificate {
t.Helper() t.Helper()
@@ -28,7 +29,8 @@ func IsValidCertificate(t *testing.T, caPath string) *x509.Certificate {
return ca return ca
} }
// Assert key file exists and is a valid signer returns a bool // IsValidSigner asserts the key file exists and is a valid signer returns a
// bool
func IsValidSigner(t *testing.T, keyPath string) bool { func IsValidSigner(t *testing.T, keyPath string) bool {
t.Helper() t.Helper()

View File

@@ -335,7 +335,7 @@ func WaitForRunning(t testing.TB, rpc rpcFn, job *structs.Job) []*structs.AllocL
return WaitForRunningWithToken(t, rpc, job, "") return WaitForRunningWithToken(t, rpc, job, "")
} }
// WaitforJobAllocStatus blocks until the ClientStatus of allocations for a job // WaitForJobAllocStatus blocks until the ClientStatus of allocations for a job
// match the expected map of <ClientStatus>: <count>. // match the expected map of <ClientStatus>: <count>.
func WaitForJobAllocStatus(t testing.TB, rpc rpcFn, job *structs.Job, allocStatus map[string]int) { func WaitForJobAllocStatus(t testing.TB, rpc rpcFn, job *structs.Job, allocStatus map[string]int) {
t.Helper() t.Helper()
@@ -387,7 +387,7 @@ func WaitForJobAllocStatusWithToken(t testing.TB, rpc rpcFn, job *structs.Job, a
return allocs return allocs
} }
// WaitforJobEvalStatus blocks until the job's evals match the status described // WaitForJobEvalStatus blocks until the job's evals match the status described
// in the map of <Eval.Status>: <count>. // in the map of <Eval.Status>: <count>.
func WaitForJobEvalStatus(t testing.TB, rpc rpcFn, job *structs.Job, evalStatus map[string]int) []*structs.Evaluation { func WaitForJobEvalStatus(t testing.TB, rpc rpcFn, job *structs.Job, evalStatus map[string]int) []*structs.Evaluation {
return WaitForJobEvalStatusWithToken(t, rpc, job, evalStatus, "") return WaitForJobEvalStatusWithToken(t, rpc, job, evalStatus, "")

View File

@@ -71,38 +71,38 @@ func GetVersion() *VersionInfo {
} }
} }
func (c *VersionInfo) VersionNumber() string { func (v *VersionInfo) VersionNumber() string {
version := c.Version version := v.Version
if c.VersionPrerelease != "" { if v.VersionPrerelease != "" {
version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease) version = fmt.Sprintf("%s-%s", version, v.VersionPrerelease)
} }
if c.VersionMetadata != "" { if v.VersionMetadata != "" {
version = fmt.Sprintf("%s+%s", version, c.VersionMetadata) version = fmt.Sprintf("%s+%s", version, v.VersionMetadata)
} }
return version return version
} }
func (c *VersionInfo) FullVersionNumber(rev bool) string { func (v *VersionInfo) FullVersionNumber(rev bool) string {
var versionString bytes.Buffer var versionString bytes.Buffer
fmt.Fprintf(&versionString, "Nomad v%s", c.Version) fmt.Fprintf(&versionString, "Nomad v%s", v.Version)
if c.VersionPrerelease != "" { if v.VersionPrerelease != "" {
fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) fmt.Fprintf(&versionString, "-%s", v.VersionPrerelease)
} }
if c.VersionMetadata != "" { if v.VersionMetadata != "" {
fmt.Fprintf(&versionString, "+%s", c.VersionMetadata) fmt.Fprintf(&versionString, "+%s", v.VersionMetadata)
} }
if !c.BuildDate.IsZero() { if !v.BuildDate.IsZero() {
fmt.Fprintf(&versionString, "\nBuildDate %s", c.BuildDate.Format(time.RFC3339)) fmt.Fprintf(&versionString, "\nBuildDate %s", v.BuildDate.Format(time.RFC3339))
} }
if rev && c.Revision != "" { if rev && v.Revision != "" {
fmt.Fprintf(&versionString, "\nRevision %s", c.Revision) fmt.Fprintf(&versionString, "\nRevision %s", v.Revision)
} }
return versionString.String() return versionString.String()