mirror of
https://github.com/kemko/nomad.git
synced 2026-01-01 07:55:42 +03:00
ci: Update golangci-lint to v2 and fix highlighted issues. (#26334)
This commit is contained in:
123
.golangci.yml
123
.golangci.yml
@@ -1,8 +1,8 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
version: "2"
|
||||
run:
|
||||
# Timeout for analysis.
|
||||
timeout: 10m
|
||||
|
||||
# Modules download mode (do not modify go.mod)
|
||||
@@ -11,82 +11,71 @@ run:
|
||||
# Exclude test files
|
||||
tests: false
|
||||
|
||||
# Skip ui and generated files
|
||||
issues:
|
||||
exclude-files:
|
||||
- ".*\\.generated\\.go$"
|
||||
- ".*bindata_assetfs\\.go$"
|
||||
skip-dirs:
|
||||
- ui
|
||||
|
||||
# Output configuration options
|
||||
output:
|
||||
formats:
|
||||
- format: colored-line-number
|
||||
text:
|
||||
path: stdout
|
||||
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
errcheck:
|
||||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-type-assertions: false
|
||||
|
||||
exclude-functions:
|
||||
- io.*
|
||||
- fmt.*
|
||||
|
||||
# path to a file containing a list of functions to exclude from checking
|
||||
# see https://github.com/kisielk/errcheck#excluding-functions for details
|
||||
# exclude: /path/to/file.txt
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
disable:
|
||||
- shadow
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- commentFormatting
|
||||
- deprecatedComment
|
||||
staticcheck:
|
||||
# I(jrasell) will work on enabling additional checks when possible.
|
||||
checks: ["ST1020", "ST1016"]
|
||||
|
||||
issues:
|
||||
exclude:
|
||||
- ifElseChain
|
||||
- singleCaseSwitch
|
||||
- assignOp
|
||||
- unlambda
|
||||
print-linter-name: true
|
||||
print-issued-lines: true
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- goimports
|
||||
- gocritic
|
||||
- misspell
|
||||
- govet
|
||||
- ineffassign
|
||||
- unconvert
|
||||
- gofmt
|
||||
- gosimple
|
||||
- staticcheck
|
||||
- asasalint
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- copyloopvar
|
||||
- dogsled
|
||||
- durationcheck
|
||||
# - errchkjson (todo)
|
||||
# - errorlint (todo)
|
||||
- copyloopvar
|
||||
- gocritic
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- usestdlibvars
|
||||
fast: false
|
||||
settings:
|
||||
errcheck:
|
||||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-type-assertions: false
|
||||
exclude-functions:
|
||||
- io.*
|
||||
- fmt.*
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- commentFormatting
|
||||
- deprecatedComment
|
||||
govet:
|
||||
disable:
|
||||
- shadow
|
||||
staticcheck:
|
||||
checks:
|
||||
- ST1016
|
||||
- ST1020
|
||||
exclusions:
|
||||
rules:
|
||||
- path: (.+)\.go$
|
||||
text: ifElseChain
|
||||
- path: (.+)\.go$
|
||||
text: singleCaseSwitch
|
||||
- path: (.+)\.go$
|
||||
text: assignOp
|
||||
- path: (.+)\.go$
|
||||
text: unlambda
|
||||
paths:
|
||||
- ".*\\.generated\\.go$"
|
||||
- ".*bindata_assetfs\\.go$"
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
gofmt:
|
||||
simplify: true
|
||||
exclusions:
|
||||
paths:
|
||||
- ".*\\.generated\\.go$"
|
||||
- ".*bindata_assetfs\\.go$"
|
||||
|
||||
@@ -143,7 +143,7 @@ deps: ## Install build and development dependencies
|
||||
.PHONY: lint-deps
|
||||
lint-deps: ## Install linter dependencies
|
||||
@echo "==> Updating linter dependencies..."
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.64.5
|
||||
go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.3.0
|
||||
go install github.com/client9/misspell/cmd/misspell@v0.3.4
|
||||
go install github.com/hashicorp/go-hclog/hclogvet@feaf6d2ec20fd895e711195c99e3fde93a68afc5
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ func (a *Agent) Members() (*ServerMembers, error) {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Members is used to query all of the known server members
|
||||
// MembersOpts is used to query all of the known server members
|
||||
// with the ability to set QueryOptions
|
||||
func (a *Agent) MembersOpts(opts *QueryOptions) (*ServerMembers, error) {
|
||||
var resp *ServerMembers
|
||||
|
||||
22
api/csi.go
22
api/csi.go
@@ -102,10 +102,11 @@ func (v *CSIVolumes) Create(vol *CSIVolume, w *WriteOptions) ([]*CSIVolume, *Wri
|
||||
return resp.Volumes, meta, err
|
||||
}
|
||||
|
||||
// DEPRECATED: will be removed in Nomad 1.4.0
|
||||
// Delete deletes a CSI volume from an external storage provider. The ID
|
||||
// passed as an argument here is for the storage provider's ID, so a volume
|
||||
// that's already been deregistered can be deleted.
|
||||
//
|
||||
// Deprecated: will be removed in Nomad 1.4.0
|
||||
func (v *CSIVolumes) Delete(externalVolID string, w *WriteOptions) error {
|
||||
_, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v/delete", url.PathEscape(externalVolID)), nil, nil, w)
|
||||
return err
|
||||
@@ -184,8 +185,9 @@ func (v *CSIVolumes) ListSnapshotsOpts(req *CSISnapshotListRequest) (*CSISnapsho
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// DEPRECATED: will be removed in Nomad 1.4.0
|
||||
// ListSnapshots lists external storage volume snapshots.
|
||||
//
|
||||
// Deprecated: will be removed in Nomad 1.4.0
|
||||
func (v *CSIVolumes) ListSnapshots(pluginID string, secrets string, q *QueryOptions) (*CSISnapshotListResponse, *QueryMeta, error) {
|
||||
var resp *CSISnapshotListResponse
|
||||
|
||||
@@ -269,26 +271,26 @@ func (o *CSIMountOptions) Merge(p *CSIMountOptions) {
|
||||
// API or in Nomad's logs.
|
||||
type CSISecrets map[string]string
|
||||
|
||||
func (q *QueryOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
|
||||
func (o *QueryOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
|
||||
pairs := []string{}
|
||||
for k, v := range secrets {
|
||||
pairs = append(pairs, fmt.Sprintf("%v=%v", k, v))
|
||||
}
|
||||
if q.Headers == nil {
|
||||
q.Headers = map[string]string{}
|
||||
if o.Headers == nil {
|
||||
o.Headers = map[string]string{}
|
||||
}
|
||||
q.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
|
||||
o.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
|
||||
}
|
||||
|
||||
func (w *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
|
||||
func (o *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
|
||||
pairs := []string{}
|
||||
for k, v := range secrets {
|
||||
pairs = append(pairs, fmt.Sprintf("%v=%v", k, v))
|
||||
}
|
||||
if w.Headers == nil {
|
||||
w.Headers = map[string]string{}
|
||||
if o.Headers == nil {
|
||||
o.Headers = map[string]string{}
|
||||
}
|
||||
w.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
|
||||
o.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
|
||||
}
|
||||
|
||||
// CSIVolume is used for serialization, see also nomad/structs/csi.go
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Checks the current executable, then $GOPATH/bin, and finally the CWD, in that
|
||||
// order. If it can't be found, an error is returned.
|
||||
// NomadExecutable checks the current executable, then $GOPATH/bin, and finally
|
||||
// the CWD, in that order. If it can't be found, an error is returned.
|
||||
func NomadExecutable() (string, error) {
|
||||
nomadExe := "nomad"
|
||||
if runtime.GOOS == "windows" {
|
||||
|
||||
@@ -176,7 +176,7 @@ func (j *Jobs) List(q *QueryOptions) ([]*JobListStub, *QueryMeta, error) {
|
||||
return j.ListOptions(nil, q)
|
||||
}
|
||||
|
||||
// List is used to list all of the existing jobs.
|
||||
// ListOptions is used to list all of the existing jobs.
|
||||
func (j *Jobs) ListOptions(opts *JobListOptions, q *QueryOptions) ([]*JobListStub, *QueryMeta, error) {
|
||||
var resp []*JobListStub
|
||||
|
||||
|
||||
@@ -126,7 +126,7 @@ func (n *Nodes) UpdateDrain(nodeID string, spec *DrainSpec, markEligible bool, q
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// UpdateDrainWithMeta is used to update the drain strategy for a given node. If
|
||||
// UpdateDrainOpts is used to update the drain strategy for a given node. If
|
||||
// markEligible is true and the drain is being removed, the node will be marked
|
||||
// as having its scheduling being eligible
|
||||
func (n *Nodes) UpdateDrainOpts(nodeID string, opts *DrainOptions, q *WriteOptions) (*NodeDrainUpdateResponse,
|
||||
@@ -478,7 +478,7 @@ func (n *Nodes) GC(nodeID string, q *QueryOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO Add tests
|
||||
// GcAlloc - TODO Add tests
|
||||
func (n *Nodes) GcAlloc(allocID string, q *QueryOptions) error {
|
||||
path := fmt.Sprintf("/v1/client/allocation/%s/gc", allocID)
|
||||
_, err := n.client.query(path, nil, q)
|
||||
|
||||
@@ -51,7 +51,7 @@ func (q *Quotas) ListUsage(qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error)
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// PrefixList is used to do a PrefixList search over quota usages
|
||||
// PrefixListUsage is used to do a PrefixList search over quota usages
|
||||
func (q *Quotas) PrefixListUsage(prefix string, qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error) {
|
||||
if qo == nil {
|
||||
qo = &QueryOptions{Prefix: prefix}
|
||||
|
||||
@@ -181,8 +181,10 @@ type NetworkResource struct {
|
||||
CNI *CNIConfig `hcl:"cni,block"`
|
||||
}
|
||||
|
||||
// Megabits should not be used.
|
||||
//
|
||||
// COMPAT(0.13)
|
||||
// XXX Deprecated. Please do not use. The method will be removed in Nomad
|
||||
// Deprecated. Please do not use. The method will be removed in Nomad
|
||||
// 0.13 and is only being kept to allow any references to be removed before
|
||||
// then.
|
||||
func (n *NetworkResource) Megabits() int {
|
||||
|
||||
20
api/tasks.go
20
api/tasks.go
@@ -310,14 +310,14 @@ func (r *ReschedulePolicy) Copy() *ReschedulePolicy {
|
||||
return nrp
|
||||
}
|
||||
|
||||
func (p *ReschedulePolicy) String() string {
|
||||
if p == nil {
|
||||
func (r *ReschedulePolicy) String() string {
|
||||
if r == nil {
|
||||
return ""
|
||||
}
|
||||
if *p.Unlimited {
|
||||
return fmt.Sprintf("unlimited with %v delay, max_delay = %v", *p.DelayFunction, *p.MaxDelay)
|
||||
if *r.Unlimited {
|
||||
return fmt.Sprintf("unlimited with %v delay, max_delay = %v", *r.DelayFunction, *r.MaxDelay)
|
||||
}
|
||||
return fmt.Sprintf("%v in %v with %v delay, max_delay = %v", *p.Attempts, *p.Interval, *p.DelayFunction, *p.MaxDelay)
|
||||
return fmt.Sprintf("%v in %v with %v delay, max_delay = %v", *r.Attempts, *r.Interval, *r.DelayFunction, *r.MaxDelay)
|
||||
}
|
||||
|
||||
// Spread is used to serialize task group allocation spread preferences
|
||||
@@ -664,7 +664,7 @@ func (g *TaskGroup) Constrain(c *Constraint) *TaskGroup {
|
||||
return g
|
||||
}
|
||||
|
||||
// AddMeta is used to add a meta k/v pair to a task group
|
||||
// SetMeta is used to add a meta k/v pair to a task group
|
||||
func (g *TaskGroup) SetMeta(key, val string) *TaskGroup {
|
||||
if g.Meta == nil {
|
||||
g.Meta = make(map[string]string)
|
||||
@@ -697,7 +697,7 @@ func (g *TaskGroup) AddSpread(s *Spread) *TaskGroup {
|
||||
return g
|
||||
}
|
||||
|
||||
// AddSpread is used to add a new spread preference to a task group.
|
||||
// ScalingPolicy is used to add a new scaling policy to a task group.
|
||||
func (g *TaskGroup) ScalingPolicy(sp *ScalingPolicy) *TaskGroup {
|
||||
g.Scaling = sp
|
||||
return g
|
||||
@@ -751,7 +751,7 @@ type TaskLifecycle struct {
|
||||
Sidecar bool `mapstructure:"sidecar" hcl:"sidecar,optional"`
|
||||
}
|
||||
|
||||
// Determine if lifecycle has user-input values
|
||||
// Empty determines if lifecycle has user-input values
|
||||
func (l *TaskLifecycle) Empty() bool {
|
||||
return l == nil
|
||||
}
|
||||
@@ -1048,7 +1048,7 @@ func NewTask(name, driver string) *Task {
|
||||
}
|
||||
}
|
||||
|
||||
// Configure is used to configure a single k/v pair on
|
||||
// SetConfig is used to configure a single k/v pair on
|
||||
// the task.
|
||||
func (t *Task) SetConfig(key string, val interface{}) *Task {
|
||||
if t.Config == nil {
|
||||
@@ -1073,7 +1073,7 @@ func (t *Task) Require(r *Resources) *Task {
|
||||
return t
|
||||
}
|
||||
|
||||
// Constraint adds a new constraints to a single task.
|
||||
// Constrain adds a new constraints to a single task.
|
||||
func (t *Task) Constrain(c *Constraint) *Task {
|
||||
t.Constraints = append(t.Constraints, c)
|
||||
return t
|
||||
|
||||
@@ -173,17 +173,17 @@ func NewAllocDir(logger hclog.Logger, clientAllocDir, clientMountsDir, allocID s
|
||||
}
|
||||
|
||||
// NewTaskDir creates a new TaskDir and adds it to the AllocDirs TaskDirs map.
|
||||
func (d *AllocDir) NewTaskDir(task *structs.Task) *TaskDir {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
func (a *AllocDir) NewTaskDir(task *structs.Task) *TaskDir {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
secretsSize := 0
|
||||
if task.Resources != nil {
|
||||
secretsSize = task.Resources.SecretsMB
|
||||
}
|
||||
|
||||
td := d.newTaskDir(task.Name, secretsSize)
|
||||
d.TaskDirs[task.Name] = td
|
||||
td := a.newTaskDir(task.Name, secretsSize)
|
||||
a.TaskDirs[task.Name] = td
|
||||
return td
|
||||
}
|
||||
|
||||
@@ -193,13 +193,13 @@ func (d *AllocDir) NewTaskDir(task *structs.Task) *TaskDir {
|
||||
// Since a valid tar may have been written even when an error occurs, a special
|
||||
// file "NOMAD-${ALLOC_ID}-ERROR.log" will be appended to the tar with the
|
||||
// error message as the contents.
|
||||
func (d *AllocDir) Snapshot(w io.Writer) error {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
func (a *AllocDir) Snapshot(w io.Writer) error {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
|
||||
allocDataDir := filepath.Join(d.SharedDir, SharedDataDir)
|
||||
allocDataDir := filepath.Join(a.SharedDir, SharedDataDir)
|
||||
rootPaths := []string{allocDataDir}
|
||||
for _, taskdir := range d.TaskDirs {
|
||||
for _, taskdir := range a.TaskDirs {
|
||||
rootPaths = append(rootPaths, taskdir.LocalDir)
|
||||
}
|
||||
|
||||
@@ -213,7 +213,7 @@ func (d *AllocDir) Snapshot(w io.Writer) error {
|
||||
|
||||
// Include the path of the file name relative to the alloc dir
|
||||
// so that we can put the files in the right directories
|
||||
relPath, err := filepath.Rel(d.AllocDir, path)
|
||||
relPath, err := filepath.Rel(a.AllocDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -256,14 +256,14 @@ func (d *AllocDir) Snapshot(w io.Writer) error {
|
||||
// directories in the archive
|
||||
for _, path := range rootPaths {
|
||||
if err := filepath.Walk(path, walkFn); err != nil {
|
||||
allocID := filepath.Base(d.AllocDir)
|
||||
allocID := filepath.Base(a.AllocDir)
|
||||
if writeErr := writeError(tw, allocID, err); writeErr != nil {
|
||||
// This could be bad; other side won't know
|
||||
// snapshotting failed. It could also just mean
|
||||
// the snapshotting side closed the connect
|
||||
// prematurely and won't try to use the tar
|
||||
// anyway.
|
||||
d.logger.Warn("snapshotting failed and unable to write error marker", "error", writeErr)
|
||||
a.logger.Warn("snapshotting failed and unable to write error marker", "error", writeErr)
|
||||
}
|
||||
return fmt.Errorf("failed to snapshot %s: %w", path, err)
|
||||
}
|
||||
@@ -273,20 +273,20 @@ func (d *AllocDir) Snapshot(w io.Writer) error {
|
||||
}
|
||||
|
||||
// Move other alloc directory's shared path and local dir to this alloc dir.
|
||||
func (d *AllocDir) Move(other Interface, tasks []*structs.Task) error {
|
||||
d.mu.RLock()
|
||||
if !d.built {
|
||||
func (a *AllocDir) Move(other Interface, tasks []*structs.Task) error {
|
||||
a.mu.RLock()
|
||||
if !a.built {
|
||||
// Enforce the invariant that Build is called before Move
|
||||
d.mu.RUnlock()
|
||||
return fmt.Errorf("unable to move to %q - alloc dir is not built", d.AllocDir)
|
||||
a.mu.RUnlock()
|
||||
return fmt.Errorf("unable to move to %q - alloc dir is not built", a.AllocDir)
|
||||
}
|
||||
|
||||
// Moving is slow and only reads immutable fields, so unlock during heavy IO
|
||||
d.mu.RUnlock()
|
||||
a.mu.RUnlock()
|
||||
|
||||
// Move the data directory
|
||||
otherDataDir := filepath.Join(other.ShareDirPath(), SharedDataDir)
|
||||
dataDir := filepath.Join(d.SharedDir, SharedDataDir)
|
||||
dataDir := filepath.Join(a.SharedDir, SharedDataDir)
|
||||
if fileInfo, err := os.Stat(otherDataDir); fileInfo != nil && err == nil {
|
||||
os.Remove(dataDir) // remove an empty data dir if it exists
|
||||
if err := os.Rename(otherDataDir, dataDir); err != nil {
|
||||
@@ -302,7 +302,7 @@ func (d *AllocDir) Move(other Interface, tasks []*structs.Task) error {
|
||||
fileInfo, err := os.Stat(otherTaskLocal)
|
||||
if fileInfo != nil && err == nil {
|
||||
// TaskDirs haven't been built yet, so create it
|
||||
newTaskDir := filepath.Join(d.AllocDir, task.Name)
|
||||
newTaskDir := filepath.Join(a.AllocDir, task.Name)
|
||||
if err := os.MkdirAll(newTaskDir, fileMode777); err != nil {
|
||||
return fmt.Errorf("error creating task %q dir: %w", task.Name, err)
|
||||
}
|
||||
@@ -318,31 +318,31 @@ func (d *AllocDir) Move(other Interface, tasks []*structs.Task) error {
|
||||
}
|
||||
|
||||
// Destroy tears down previously build directory structure.
|
||||
func (d *AllocDir) Destroy() error {
|
||||
func (a *AllocDir) Destroy() error {
|
||||
// Unmount all mounted shared alloc dirs.
|
||||
mErr := new(multierror.Error)
|
||||
if err := d.UnmountAll(); err != nil {
|
||||
if err := a.UnmountAll(); err != nil {
|
||||
mErr = multierror.Append(mErr, err)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(d.AllocDir); err != nil {
|
||||
mErr = multierror.Append(mErr, fmt.Errorf("failed to remove alloc dir %q: %w", d.AllocDir, err))
|
||||
if err := os.RemoveAll(a.AllocDir); err != nil {
|
||||
mErr = multierror.Append(mErr, fmt.Errorf("failed to remove alloc dir %q: %w", a.AllocDir, err))
|
||||
}
|
||||
|
||||
// Unset built since the alloc dir has been destroyed.
|
||||
d.mu.Lock()
|
||||
d.built = false
|
||||
d.mu.Unlock()
|
||||
a.mu.Lock()
|
||||
a.built = false
|
||||
a.mu.Unlock()
|
||||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
// UnmountAll linked/mounted directories in task dirs.
|
||||
func (d *AllocDir) UnmountAll() error {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
func (a *AllocDir) UnmountAll() error {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
|
||||
mErr := new(multierror.Error)
|
||||
for _, dir := range d.TaskDirs {
|
||||
for _, dir := range a.TaskDirs {
|
||||
if err := dir.Unmount(); err != nil {
|
||||
mErr = multierror.Append(mErr, err)
|
||||
}
|
||||
@@ -352,41 +352,41 @@ func (d *AllocDir) UnmountAll() error {
|
||||
}
|
||||
|
||||
// Build the directory tree for an allocation.
|
||||
func (d *AllocDir) Build() error {
|
||||
func (a *AllocDir) Build() error {
|
||||
// Make the alloc directory, owned by the nomad process.
|
||||
if err := os.MkdirAll(d.AllocDir, fileMode755); err != nil {
|
||||
return fmt.Errorf("Failed to make the alloc directory %v: %w", d.AllocDir, err)
|
||||
if err := os.MkdirAll(a.AllocDir, fileMode755); err != nil {
|
||||
return fmt.Errorf("Failed to make the alloc directory %v: %w", a.AllocDir, err)
|
||||
}
|
||||
|
||||
// Make the shared directory and make it available to all user/groups.
|
||||
if err := allocMkdirAll(d.SharedDir, fileMode755); err != nil {
|
||||
if err := allocMkdirAll(a.SharedDir, fileMode755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create shared subdirs
|
||||
for _, dir := range SharedAllocDirs {
|
||||
p := filepath.Join(d.SharedDir, dir)
|
||||
p := filepath.Join(a.SharedDir, dir)
|
||||
if err := allocMkdirAll(p, fileMode777); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Mark as built
|
||||
d.mu.Lock()
|
||||
d.built = true
|
||||
d.mu.Unlock()
|
||||
a.mu.Lock()
|
||||
a.built = true
|
||||
a.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// List returns the list of files at a path relative to the alloc dir
|
||||
func (d *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) {
|
||||
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil {
|
||||
func (a *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) {
|
||||
if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil {
|
||||
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err)
|
||||
} else if escapes {
|
||||
return nil, fmt.Errorf("Path escapes the alloc directory")
|
||||
}
|
||||
|
||||
p := filepath.Join(d.AllocDir, path)
|
||||
p := filepath.Join(a.AllocDir, path)
|
||||
finfos, err := os.ReadDir(p)
|
||||
if err != nil {
|
||||
return []*cstructs.AllocFileInfo{}, err
|
||||
@@ -409,14 +409,14 @@ func (d *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) {
|
||||
}
|
||||
|
||||
// Stat returns information about the file at a path relative to the alloc dir
|
||||
func (d *AllocDir) Stat(path string) (*cstructs.AllocFileInfo, error) {
|
||||
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil {
|
||||
func (a *AllocDir) Stat(path string) (*cstructs.AllocFileInfo, error) {
|
||||
if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil {
|
||||
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err)
|
||||
} else if escapes {
|
||||
return nil, fmt.Errorf("Path escapes the alloc directory")
|
||||
}
|
||||
|
||||
p := filepath.Join(d.AllocDir, path)
|
||||
p := filepath.Join(a.AllocDir, path)
|
||||
info, err := os.Stat(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -459,28 +459,28 @@ func detectContentType(fileInfo os.FileInfo, path string) string {
|
||||
}
|
||||
|
||||
// ReadAt returns a reader for a file at the path relative to the alloc dir
|
||||
func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) {
|
||||
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil {
|
||||
func (a *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) {
|
||||
if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil {
|
||||
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err)
|
||||
} else if escapes {
|
||||
return nil, fmt.Errorf("Path escapes the alloc directory")
|
||||
}
|
||||
|
||||
p := filepath.Join(d.AllocDir, path)
|
||||
p := filepath.Join(a.AllocDir, path)
|
||||
|
||||
// Check if it is trying to read into a secret directory
|
||||
d.mu.RLock()
|
||||
for _, dir := range d.TaskDirs {
|
||||
a.mu.RLock()
|
||||
for _, dir := range a.TaskDirs {
|
||||
if caseInsensitiveHasPrefix(p, dir.SecretsDir) {
|
||||
d.mu.RUnlock()
|
||||
a.mu.RUnlock()
|
||||
return nil, fmt.Errorf("Reading secret file prohibited: %s", path)
|
||||
}
|
||||
if caseInsensitiveHasPrefix(p, dir.PrivateDir) {
|
||||
d.mu.RUnlock()
|
||||
a.mu.RUnlock()
|
||||
return nil, fmt.Errorf("Reading private file prohibited: %s", path)
|
||||
}
|
||||
}
|
||||
d.mu.RUnlock()
|
||||
a.mu.RUnlock()
|
||||
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
@@ -499,15 +499,15 @@ func caseInsensitiveHasPrefix(s, prefix string) bool {
|
||||
|
||||
// BlockUntilExists blocks until the passed file relative the allocation
|
||||
// directory exists. The block can be cancelled with the passed context.
|
||||
func (d *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan error, error) {
|
||||
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil {
|
||||
func (a *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan error, error) {
|
||||
if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil {
|
||||
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err)
|
||||
} else if escapes {
|
||||
return nil, fmt.Errorf("Path escapes the alloc directory")
|
||||
}
|
||||
|
||||
// Get the path relative to the alloc directory
|
||||
p := filepath.Join(d.AllocDir, path)
|
||||
p := filepath.Join(a.AllocDir, path)
|
||||
watcher := getFileWatcher(p)
|
||||
returnCh := make(chan error, 1)
|
||||
t := &tomb.Tomb{}
|
||||
@@ -525,8 +525,8 @@ func (d *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan erro
|
||||
// ChangeEvents watches for changes to the passed path relative to the
|
||||
// allocation directory. The offset should be the last read offset. The context is
|
||||
// used to clean up the watch.
|
||||
func (d *AllocDir) ChangeEvents(ctx context.Context, path string, curOffset int64) (*watch.FileChanges, error) {
|
||||
if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil {
|
||||
func (a *AllocDir) ChangeEvents(ctx context.Context, path string, curOffset int64) (*watch.FileChanges, error) {
|
||||
if escapes, err := escapingfs.PathEscapesAllocDir(a.AllocDir, "", path); err != nil {
|
||||
return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err)
|
||||
} else if escapes {
|
||||
return nil, fmt.Errorf("Path escapes the alloc directory")
|
||||
@@ -539,7 +539,7 @@ func (d *AllocDir) ChangeEvents(ctx context.Context, path string, curOffset int6
|
||||
}()
|
||||
|
||||
// Get the path relative to the alloc directory
|
||||
p := filepath.Join(d.AllocDir, path)
|
||||
p := filepath.Join(a.AllocDir, path)
|
||||
watcher := getFileWatcher(p)
|
||||
return watcher.ChangeEvents(t, curOffset)
|
||||
}
|
||||
|
||||
@@ -98,28 +98,28 @@ type TaskDir struct {
|
||||
// create paths on disk.
|
||||
//
|
||||
// Call AllocDir.NewTaskDir to create new TaskDirs
|
||||
func (d *AllocDir) newTaskDir(taskName string, secretsInMB int) *TaskDir {
|
||||
taskDir := filepath.Join(d.AllocDir, taskName)
|
||||
taskUnique := filepath.Base(d.AllocDir) + "-" + taskName
|
||||
func (a *AllocDir) newTaskDir(taskName string, secretsInMB int) *TaskDir {
|
||||
taskDir := filepath.Join(a.AllocDir, taskName)
|
||||
taskUnique := filepath.Base(a.AllocDir) + "-" + taskName
|
||||
|
||||
if secretsInMB == 0 {
|
||||
secretsInMB = defaultSecretDirTmpfsSize
|
||||
}
|
||||
|
||||
return &TaskDir{
|
||||
AllocDir: d.AllocDir,
|
||||
AllocDir: a.AllocDir,
|
||||
Dir: taskDir,
|
||||
SharedAllocDir: filepath.Join(d.AllocDir, SharedAllocName),
|
||||
LogDir: filepath.Join(d.AllocDir, SharedAllocName, LogDirName),
|
||||
SharedAllocDir: filepath.Join(a.AllocDir, SharedAllocName),
|
||||
LogDir: filepath.Join(a.AllocDir, SharedAllocName, LogDirName),
|
||||
SharedTaskDir: filepath.Join(taskDir, SharedAllocName),
|
||||
LocalDir: filepath.Join(taskDir, TaskLocal),
|
||||
SecretsDir: filepath.Join(taskDir, TaskSecrets),
|
||||
PrivateDir: filepath.Join(taskDir, TaskPrivate),
|
||||
MountsAllocDir: filepath.Join(d.clientAllocMountsDir, taskUnique, "alloc"),
|
||||
MountsTaskDir: filepath.Join(d.clientAllocMountsDir, taskUnique),
|
||||
MountsSecretsDir: filepath.Join(d.clientAllocMountsDir, taskUnique, "secrets"),
|
||||
skip: set.From[string]([]string{d.clientAllocDir, d.clientAllocMountsDir}),
|
||||
logger: d.logger.Named("task_dir").With("task_name", taskName),
|
||||
MountsAllocDir: filepath.Join(a.clientAllocMountsDir, taskUnique, "alloc"),
|
||||
MountsTaskDir: filepath.Join(a.clientAllocMountsDir, taskUnique),
|
||||
MountsSecretsDir: filepath.Join(a.clientAllocMountsDir, taskUnique, "secrets"),
|
||||
skip: set.From[string]([]string{a.clientAllocDir, a.clientAllocMountsDir}),
|
||||
logger: a.logger.Named("task_dir").With("task_name", taskName),
|
||||
secretsInMB: secretsInMB,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,7 +88,7 @@ func (e *editor) Write(filename, content string) error {
|
||||
return os.WriteFile(path, []byte(content), 0644)
|
||||
}
|
||||
|
||||
// A Factory creates a Lifecycle which is an abstraction over the setup and
|
||||
// Factory creates a Lifecycle which is an abstraction over the setup and
|
||||
// teardown routines used for creating and destroying cgroups used for
|
||||
// constraining Nomad tasks.
|
||||
func Factory(allocID, task string, cores bool) Lifecycle {
|
||||
@@ -106,7 +106,7 @@ func Factory(allocID, task string, cores bool) Lifecycle {
|
||||
}
|
||||
}
|
||||
|
||||
// A Lifecycle manages the lifecycle of the cgroup(s) of a task from the
|
||||
// Lifecycle manages the lifecycle of the cgroup(s) of a task from the
|
||||
// perspective of the Nomad client. That is, it creates and deletes the cgroups
|
||||
// for a task, as well as provides last effort kill semantics for ensuring a
|
||||
// process cannot stay alive beyond the intent of the client.
|
||||
|
||||
@@ -27,7 +27,8 @@ import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Returns an object representing the current OS thread's network namespace
|
||||
// GetCurrentNS returns an object representing the current OS thread's network
|
||||
// namespace
|
||||
func GetCurrentNS() (NetNS, error) {
|
||||
// Lock the thread in case other goroutine executes in it and changes its
|
||||
// network namespace after getCurrentThreadNetNSPath(), otherwise it might
|
||||
@@ -140,7 +141,7 @@ func IsNSorErr(nspath string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Returns an object representing the namespace referred to by @path
|
||||
// GetNS returns an object representing the namespace referred to by @path
|
||||
func GetNS(nspath string) (NetNS, error) {
|
||||
err := IsNSorErr(nspath)
|
||||
if err != nil {
|
||||
|
||||
@@ -87,18 +87,18 @@ type Topology struct {
|
||||
OverrideWitholdCompute hw.MHz
|
||||
}
|
||||
|
||||
func (t *Topology) SetNodes(nodes *idset.Set[hw.NodeID]) {
|
||||
t.nodeIDs = nodes
|
||||
func (st *Topology) SetNodes(nodes *idset.Set[hw.NodeID]) {
|
||||
st.nodeIDs = nodes
|
||||
if !nodes.Empty() {
|
||||
t.Nodes = nodes.Slice()
|
||||
st.Nodes = nodes.Slice()
|
||||
} else {
|
||||
t.Nodes = []uint8{}
|
||||
st.Nodes = []uint8{}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Topology) SetNodesFrom(nodes []uint8) {
|
||||
t.nodeIDs = idset.From[hw.NodeID](nodes)
|
||||
t.Nodes = nodes
|
||||
func (st *Topology) SetNodesFrom(nodes []uint8) {
|
||||
st.nodeIDs = idset.From[hw.NodeID](nodes)
|
||||
st.Nodes = nodes
|
||||
}
|
||||
|
||||
// A Core represents one logical (vCPU) core on a processor. Basically the slice
|
||||
|
||||
@@ -2473,8 +2473,8 @@ func (s *ServerConfig) Merge(b *ServerConfig) *ServerConfig {
|
||||
}
|
||||
|
||||
// Merge is used to merge two client configs together
|
||||
func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
|
||||
result := *a
|
||||
func (c *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
|
||||
result := *c
|
||||
|
||||
if b.Enabled {
|
||||
result.Enabled = true
|
||||
@@ -2615,10 +2615,10 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
|
||||
result.ServerJoin = result.ServerJoin.Merge(b.ServerJoin)
|
||||
}
|
||||
|
||||
if len(a.HostVolumes) == 0 && len(b.HostVolumes) != 0 {
|
||||
if len(c.HostVolumes) == 0 && len(b.HostVolumes) != 0 {
|
||||
result.HostVolumes = structs.CopySliceClientHostVolumeConfig(b.HostVolumes)
|
||||
} else if len(b.HostVolumes) != 0 {
|
||||
result.HostVolumes = structs.HostVolumeSliceMerge(a.HostVolumes, b.HostVolumes)
|
||||
result.HostVolumes = structs.HostVolumeSliceMerge(c.HostVolumes, b.HostVolumes)
|
||||
}
|
||||
|
||||
if b.CNIPath != "" {
|
||||
@@ -2640,7 +2640,7 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
|
||||
result.BridgeNetworkHairpinMode = true
|
||||
}
|
||||
|
||||
result.HostNetworks = a.HostNetworks
|
||||
result.HostNetworks = c.HostNetworks
|
||||
|
||||
if len(b.HostNetworks) != 0 {
|
||||
result.HostNetworks = append(result.HostNetworks, b.HostNetworks...)
|
||||
@@ -2660,9 +2660,9 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
|
||||
result.CgroupParent = b.CgroupParent
|
||||
}
|
||||
|
||||
result.Artifact = a.Artifact.Merge(b.Artifact)
|
||||
result.Drain = a.Drain.Merge(b.Drain)
|
||||
result.Users = a.Users.Merge(b.Users)
|
||||
result.Artifact = c.Artifact.Merge(b.Artifact)
|
||||
result.Drain = c.Drain.Merge(b.Drain)
|
||||
result.Users = c.Users.Merge(b.Users)
|
||||
|
||||
if b.NodeMaxAllocs != 0 {
|
||||
result.NodeMaxAllocs = b.NodeMaxAllocs
|
||||
@@ -2789,8 +2789,8 @@ func (t *Telemetry) Merge(b *Telemetry) *Telemetry {
|
||||
}
|
||||
|
||||
// Merge is used to merge two port configurations.
|
||||
func (a *Ports) Merge(b *Ports) *Ports {
|
||||
result := *a
|
||||
func (p *Ports) Merge(b *Ports) *Ports {
|
||||
result := *p
|
||||
|
||||
if b.HTTP != 0 {
|
||||
result.HTTP = b.HTTP
|
||||
|
||||
@@ -272,7 +272,7 @@ func (c *MockAgent) CheckRegs() []*api.AgentCheckRegistration {
|
||||
return regs
|
||||
}
|
||||
|
||||
// CheckRegister implements AgentAPI
|
||||
// CheckRegisterOpts implements AgentAPI
|
||||
func (c *MockAgent) CheckRegisterOpts(check *api.AgentCheckRegistration, _ *api.QueryOptions) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
@@ -168,7 +168,7 @@ type ACLsAPI interface {
|
||||
// sidecar - Consul's view (agent, not catalog) of the service definition of the sidecar
|
||||
// associated with existing that may or may not exist.
|
||||
// May be nil.
|
||||
func (s *ServiceClient) agentServiceUpdateRequired(reason syncReason, wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool {
|
||||
func (c *ServiceClient) agentServiceUpdateRequired(reason syncReason, wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool {
|
||||
switch reason {
|
||||
case syncPeriodic:
|
||||
// In a periodic sync with Consul, we need to respect the value of
|
||||
@@ -188,7 +188,7 @@ func (s *ServiceClient) agentServiceUpdateRequired(reason syncReason, wanted *ap
|
||||
maybeTweakTaggedAddresses(wanted, existing)
|
||||
|
||||
// Okay now it is safe to compare.
|
||||
return s.different(wanted, existing, sidecar)
|
||||
return c.different(wanted, existing, sidecar)
|
||||
|
||||
default:
|
||||
// A non-periodic sync with Consul indicates an operation has been set
|
||||
@@ -200,7 +200,7 @@ func (s *ServiceClient) agentServiceUpdateRequired(reason syncReason, wanted *ap
|
||||
maybeTweakTaggedAddresses(wanted, existing)
|
||||
|
||||
// Okay now it is safe to compare.
|
||||
return s.different(wanted, existing, sidecar)
|
||||
return c.different(wanted, existing, sidecar)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -245,9 +245,9 @@ func maybeTweakTaggedAddresses(wanted *api.AgentServiceRegistration, existing *a
|
||||
// different compares the wanted state of the service registration with the actual
|
||||
// (cached) state of the service registration reported by Consul. If any of the
|
||||
// critical fields are not deeply equal, they considered different.
|
||||
func (s *ServiceClient) different(wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool {
|
||||
func (c *ServiceClient) different(wanted *api.AgentServiceRegistration, existing *api.AgentService, sidecar *api.AgentService) bool {
|
||||
trace := func(field string, left, right any) {
|
||||
s.logger.Trace("registrations different", "id", wanted.ID,
|
||||
c.logger.Trace("registrations different", "id", wanted.ID,
|
||||
"field", field, "wanted", fmt.Sprintf("%#v", left), "existing", fmt.Sprintf("%#v", right),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/shirou/gopsutil/v3/process"
|
||||
)
|
||||
|
||||
// List the process tree starting at the given executorPID
|
||||
// ListByPid the process tree starting at the given executorPID
|
||||
func ListByPid(executorPID int) set.Collection[ProcessID] {
|
||||
result := set.New[ProcessID](10)
|
||||
|
||||
|
||||
@@ -579,7 +579,7 @@ func Verbose(on bool) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// Set an HCL variable.
|
||||
// Var sets a HCL variable.
|
||||
func Var(key, value string) Option {
|
||||
return func(sub *Submission) {
|
||||
sub.vars[key] = value
|
||||
|
||||
@@ -65,10 +65,10 @@ func (ns *Namespace) String() string {
|
||||
return ns.Name
|
||||
}
|
||||
|
||||
func (n *Names) setClient() {
|
||||
func (g *Names) setClient() {
|
||||
nomadClient, nomadErr := nomadapi.NewClient(nomadapi.DefaultConfig())
|
||||
must.NoError(n.t, nomadErr, must.Sprint("failed to create nomad api client"))
|
||||
n.nomadClient = nomadClient
|
||||
must.NoError(g.t, nomadErr, must.Sprint("failed to create nomad api client"))
|
||||
g.nomadClient = nomadClient
|
||||
}
|
||||
|
||||
func configure(t *testing.T, opts ...Option) Cleanup {
|
||||
@@ -117,7 +117,7 @@ func Create(t *testing.T, name string, opts ...Option) Cleanup {
|
||||
return configure(t, append(opts, opt)...)
|
||||
}
|
||||
|
||||
// Create namespaces of the given names.
|
||||
// CreateN namespaces of the given names.
|
||||
func CreateN(t *testing.T, names []string, opts ...Option) Cleanup {
|
||||
creations := helper.ConvertSlice(names, func(name string) Option {
|
||||
namespace := &Namespace{Name: name}
|
||||
|
||||
@@ -35,8 +35,8 @@ func (g *Group) Wait() {
|
||||
g.wg.Wait()
|
||||
}
|
||||
|
||||
// Wait for all goroutines to exit, or for the context to finish.
|
||||
// Must be called after all calls to Go complete.
|
||||
// WaitWithContext waits for all goroutines to exit, or for the context to
|
||||
// finish. Must be called after all calls to Go complete.
|
||||
func (g *Group) WaitWithContext(ctx context.Context) {
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
|
||||
@@ -67,6 +67,6 @@ func (h jobConsulHook) validateCluster(name string) error {
|
||||
|
||||
// Mutate ensures that the job's Consul cluster has been configured to be the
|
||||
// default Consul cluster if unset
|
||||
func (j jobConsulHook) Mutate(job *structs.Job) (*structs.Job, []error, error) {
|
||||
return j.mutateImpl(job, structs.ConsulDefaultCluster), nil, nil
|
||||
func (h jobConsulHook) Mutate(job *structs.Job) (*structs.Job, []error, error) {
|
||||
return h.mutateImpl(job, structs.ConsulDefaultCluster), nil, nil
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ func (h jobVaultHook) validateClustersForNamespace(_ *structs.Job, blocks map[st
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j jobVaultHook) Mutate(job *structs.Job) (*structs.Job, []error, error) {
|
||||
func (h jobVaultHook) Mutate(job *structs.Job) (*structs.Job, []error, error) {
|
||||
for _, tg := range job.TaskGroups {
|
||||
for _, task := range tg.Tasks {
|
||||
if task.Vault == nil || task.Vault.Cluster != "" {
|
||||
|
||||
@@ -98,7 +98,7 @@ func (t *TTLTimer) EmitMetrics(period time.Duration, shutdownCh chan struct{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// timerNum returns the number of registered timers.
|
||||
// TimerNum returns the number of registered timers.
|
||||
func (t *TTLTimer) TimerNum() int {
|
||||
t.lock.RLock()
|
||||
defer t.lock.RUnlock()
|
||||
|
||||
@@ -104,7 +104,7 @@ func (op *Operator) RaftGetConfiguration(args *structs.GenericRequest, reply *st
|
||||
return nil
|
||||
}
|
||||
|
||||
// COMPAT(1.12.0): RaftRemovePeerByAddress was used to support Raft Protocol v2,
|
||||
// RaftRemovePeerByAddress COMPAT(1.12.0) was used to support Raft Protocol v2,
|
||||
// which was removed in Nomad 1.4.0 but the API was not removed. Remove this RPC
|
||||
// entirely in Nomad 1.12.0.
|
||||
func (op *Operator) RaftRemovePeerByAddress(_ *structs.RaftPeerByAddressRequest, _ *struct{}) error {
|
||||
|
||||
@@ -43,8 +43,8 @@ func (s *SentinelImport) Copy() *SentinelImport {
|
||||
}
|
||||
|
||||
// Merge is used to merge two Sentinel configs together. The settings from the input always take precedence.
|
||||
func (a *SentinelConfig) Merge(b *SentinelConfig) *SentinelConfig {
|
||||
result := *a
|
||||
func (s *SentinelConfig) Merge(b *SentinelConfig) *SentinelConfig {
|
||||
result := *s
|
||||
if len(b.Imports) > 0 {
|
||||
result.Imports = append(result.Imports, b.Imports...)
|
||||
}
|
||||
|
||||
@@ -48,20 +48,20 @@ type ContentSecurityPolicy struct {
|
||||
}
|
||||
|
||||
// Copy returns a copy of this Vault UI config.
|
||||
func (old *ContentSecurityPolicy) Copy() *ContentSecurityPolicy {
|
||||
if old == nil {
|
||||
func (csp *ContentSecurityPolicy) Copy() *ContentSecurityPolicy {
|
||||
if csp == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
nc := new(ContentSecurityPolicy)
|
||||
*nc = *old
|
||||
nc.ConnectSrc = slices.Clone(old.ConnectSrc)
|
||||
nc.DefaultSrc = slices.Clone(old.DefaultSrc)
|
||||
nc.FormAction = slices.Clone(old.FormAction)
|
||||
nc.FrameAncestors = slices.Clone(old.FrameAncestors)
|
||||
nc.ImgSrc = slices.Clone(old.ImgSrc)
|
||||
nc.ScriptSrc = slices.Clone(old.ScriptSrc)
|
||||
nc.StyleSrc = slices.Clone(old.StyleSrc)
|
||||
*nc = *csp
|
||||
nc.ConnectSrc = slices.Clone(csp.ConnectSrc)
|
||||
nc.DefaultSrc = slices.Clone(csp.DefaultSrc)
|
||||
nc.FormAction = slices.Clone(csp.FormAction)
|
||||
nc.FrameAncestors = slices.Clone(csp.FrameAncestors)
|
||||
nc.ImgSrc = slices.Clone(csp.ImgSrc)
|
||||
nc.ScriptSrc = slices.Clone(csp.ScriptSrc)
|
||||
nc.StyleSrc = slices.Clone(csp.StyleSrc)
|
||||
return nc
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,8 @@ type DeviceAccounterInstance struct {
|
||||
Instances map[string]int
|
||||
}
|
||||
|
||||
// Locality returns the NodeDeviceLocality of the instance of the specific deviceID.
|
||||
// GetLocality returns the NodeDeviceLocality of the instance of the specific
|
||||
// deviceID.
|
||||
//
|
||||
// If no instance matching the deviceID is found, nil is returned.
|
||||
func (dai *DeviceAccounterInstance) GetLocality(instanceID string) *NodeDeviceLocality {
|
||||
@@ -163,9 +164,9 @@ func (d *DeviceAccounter) AddReserved(res *AllocatedDeviceResource) (collision b
|
||||
}
|
||||
|
||||
// FreeCount returns the number of free device instances
|
||||
func (i *DeviceAccounterInstance) FreeCount() int {
|
||||
func (dai *DeviceAccounterInstance) FreeCount() int {
|
||||
count := 0
|
||||
for _, c := range i.Instances {
|
||||
for _, c := range dai.Instances {
|
||||
if c == 0 {
|
||||
count++
|
||||
}
|
||||
|
||||
@@ -2827,23 +2827,23 @@ func portDiffs(old, new []Port, dynamic bool, contextual bool) []*ObjectDiff {
|
||||
|
||||
}
|
||||
|
||||
func (r *NUMA) Diff(other *NUMA, contextual bool) *ObjectDiff {
|
||||
if r.Equal(other) {
|
||||
func (n *NUMA) Diff(other *NUMA, contextual bool) *ObjectDiff {
|
||||
if n.Equal(other) {
|
||||
return nil
|
||||
}
|
||||
|
||||
diff := &ObjectDiff{Type: DiffTypeNone, Name: "NUMA"}
|
||||
var oldPrimitiveFlat, newPrimitiveFlat map[string]string
|
||||
|
||||
if r == nil {
|
||||
if n == nil {
|
||||
diff.Type = DiffTypeAdded
|
||||
newPrimitiveFlat = flatmap.Flatten(other, nil, true)
|
||||
} else if other == nil {
|
||||
diff.Type = DiffTypeDeleted
|
||||
oldPrimitiveFlat = flatmap.Flatten(r, nil, true)
|
||||
oldPrimitiveFlat = flatmap.Flatten(n, nil, true)
|
||||
} else {
|
||||
diff.Type = DiffTypeEdited
|
||||
oldPrimitiveFlat = flatmap.Flatten(r, nil, true)
|
||||
oldPrimitiveFlat = flatmap.Flatten(n, nil, true)
|
||||
newPrimitiveFlat = flatmap.Flatten(other, nil, true)
|
||||
}
|
||||
diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual)
|
||||
|
||||
@@ -88,7 +88,7 @@ func (k *UnwrappedRootKey) Copy() *UnwrappedRootKey {
|
||||
}
|
||||
}
|
||||
|
||||
// MakeInactive returns a copy of the RootKey with the meta state set to active
|
||||
// MakeActive returns a copy of the RootKey with the meta state set to active
|
||||
func (k *UnwrappedRootKey) MakeActive() *UnwrappedRootKey {
|
||||
meta := k.Meta.Copy()
|
||||
meta.State = RootKeyStateActive
|
||||
|
||||
@@ -118,7 +118,7 @@ func (r LegacyNodeCpuResources) empty() bool {
|
||||
return r.CpuShares == 0 || r.TotalCpuCores == 0
|
||||
}
|
||||
|
||||
// NomadProcessorResources captures the CPU hardware resources of the Nomad node.
|
||||
// NodeProcessorResources captures the CPU hardware resources of the Nomad node.
|
||||
//
|
||||
// In Nomad enterprise this structure is used to map tasks to NUMA nodes.
|
||||
type NodeProcessorResources struct {
|
||||
@@ -126,8 +126,9 @@ type NodeProcessorResources struct {
|
||||
Topology *numalib.Topology // do not modify
|
||||
}
|
||||
|
||||
// partial struct serialization / copy / merge sadness means this struct can
|
||||
// exist with no data, which is a condition we must detect during the upgrade path
|
||||
// Empty handles partial struct serialization / copy / merge sadness that means
|
||||
// this struct can exist with no data, which is a condition we must detect
|
||||
// during the upgrade path.
|
||||
func (r NodeProcessorResources) Empty() bool {
|
||||
return r.Topology == nil || len(r.Topology.Cores) == 0
|
||||
}
|
||||
|
||||
@@ -391,8 +391,8 @@ func (vd VariableDecrypted) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// A new variable can be crated just to support a lock, it doesn't require to hold
|
||||
// any items and it will validate the lock.
|
||||
// ValidateForLock ensures a new variable can be created just to support a lock,
|
||||
// it doesn't require to hold any items and it will validate the lock.
|
||||
func (vd VariableDecrypted) ValidateForLock() error {
|
||||
var mErr multierror.Error
|
||||
if vd.Namespace == AllNamespacesSentinel {
|
||||
|
||||
@@ -182,7 +182,7 @@ func (w *Worker) Resume() {
|
||||
}
|
||||
}
|
||||
|
||||
// Resume transitions a worker to the stopping state. Check
|
||||
// Stop transitions a worker to the stopping state. Check
|
||||
// to see if the worker stopped by calling IsStopped()
|
||||
func (w *Worker) Stop() {
|
||||
w.setStatus(WorkerStopping)
|
||||
@@ -250,7 +250,7 @@ func (w *Worker) setWorkerStatusLocked(newStatus WorkerStatus) {
|
||||
w.status = newStatus
|
||||
}
|
||||
|
||||
// GetStatus returns the status of the Worker's Workload.
|
||||
// GetWorkloadStatus returns the status of the Worker's Workload.
|
||||
func (w *Worker) GetWorkloadStatus() SchedulerWorkerStatus {
|
||||
w.statusLock.RLock()
|
||||
defer w.statusLock.RUnlock()
|
||||
@@ -578,7 +578,7 @@ type ErrMinIndexDeadlineExceeded struct {
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// Unwrapping an ErrMinIndexDeadlineExceeded always return
|
||||
// Unwrap an ErrMinIndexDeadlineExceeded that always returns
|
||||
// context.DeadlineExceeded
|
||||
func (ErrMinIndexDeadlineExceeded) Unwrap() error {
|
||||
return context.DeadlineExceeded
|
||||
|
||||
@@ -84,16 +84,16 @@ type ClientDriverConfig struct {
|
||||
Topology *numalib.Topology
|
||||
}
|
||||
|
||||
func (c *AgentConfig) toProto() *proto.NomadConfig {
|
||||
if c == nil {
|
||||
func (ac *AgentConfig) toProto() *proto.NomadConfig {
|
||||
if ac == nil {
|
||||
return nil
|
||||
}
|
||||
cfg := &proto.NomadConfig{}
|
||||
if c.Driver != nil {
|
||||
if ac.Driver != nil {
|
||||
cfg.Driver = &proto.NomadDriverConfig{
|
||||
ClientMaxPort: uint32(c.Driver.ClientMaxPort),
|
||||
ClientMinPort: uint32(c.Driver.ClientMinPort),
|
||||
Topology: nomadTopologyToProto(c.Driver.Topology),
|
||||
ClientMaxPort: uint32(ac.Driver.ClientMaxPort),
|
||||
ClientMinPort: uint32(ac.Driver.ClientMinPort),
|
||||
Topology: nomadTopologyToProto(ac.Driver.Topology),
|
||||
}
|
||||
}
|
||||
return cfg
|
||||
|
||||
@@ -33,9 +33,9 @@ func newDeviceAllocator(ctx Context, n *structs.Node) *deviceAllocator {
|
||||
}
|
||||
}
|
||||
|
||||
func (da *deviceAllocator) Copy() *deviceAllocator {
|
||||
accounter := da.DeviceAccounter.Copy()
|
||||
allocator := &deviceAllocator{accounter, da.ctx}
|
||||
func (d *deviceAllocator) Copy() *deviceAllocator {
|
||||
accounter := d.DeviceAccounter.Copy()
|
||||
allocator := &deviceAllocator{accounter, d.ctx}
|
||||
return allocator
|
||||
}
|
||||
|
||||
|
||||
@@ -267,9 +267,9 @@ func (set allocSet) filterByTainted(state ClusterState) (untainted, migrate, los
|
||||
|
||||
// filterOutByClientStatus returns a new allocSet containing allocs that don't
|
||||
// have the specified client status
|
||||
func (a allocSet) filterOutByClientStatus(clientStatuses ...string) allocSet {
|
||||
func (set allocSet) filterOutByClientStatus(clientStatuses ...string) allocSet {
|
||||
allocs := make(allocSet)
|
||||
for _, alloc := range a {
|
||||
for _, alloc := range set {
|
||||
if !slices.Contains(clientStatuses, alloc.ClientStatus) {
|
||||
allocs[alloc.ID] = alloc
|
||||
}
|
||||
@@ -280,9 +280,9 @@ func (a allocSet) filterOutByClientStatus(clientStatuses ...string) allocSet {
|
||||
|
||||
// filterByClientStatus returns a new allocSet containing allocs that have the
|
||||
// specified client status
|
||||
func (a allocSet) filterByClientStatus(clientStatus string) allocSet {
|
||||
func (set allocSet) filterByClientStatus(clientStatus string) allocSet {
|
||||
allocs := make(allocSet)
|
||||
for _, alloc := range a {
|
||||
for _, alloc := range set {
|
||||
if alloc.ClientStatus == clientStatus {
|
||||
allocs[alloc.ID] = alloc
|
||||
}
|
||||
@@ -450,9 +450,9 @@ func updateByReschedulable(alloc *structs.Allocation, now time.Time, evalID stri
|
||||
|
||||
// delayByStopAfter returns a delay for any lost allocation that's got a
|
||||
// disconnect.stop_on_client_after configured
|
||||
func (a allocSet) delayByStopAfter() (later []*delayedRescheduleInfo) {
|
||||
func (set allocSet) delayByStopAfter() (later []*delayedRescheduleInfo) {
|
||||
now := time.Now().UTC()
|
||||
for _, a := range a {
|
||||
for _, a := range set {
|
||||
if !a.ShouldClientStop() {
|
||||
continue
|
||||
}
|
||||
@@ -472,10 +472,10 @@ func (a allocSet) delayByStopAfter() (later []*delayedRescheduleInfo) {
|
||||
|
||||
// delayByLostAfter returns a delay for any unknown allocation
|
||||
// that has disconnect.lost_after configured
|
||||
func (a allocSet) delayByLostAfter(now time.Time) ([]*delayedRescheduleInfo, error) {
|
||||
func (set allocSet) delayByLostAfter(now time.Time) ([]*delayedRescheduleInfo, error) {
|
||||
var later []*delayedRescheduleInfo
|
||||
|
||||
for _, alloc := range a {
|
||||
for _, alloc := range set {
|
||||
timeout := alloc.DisconnectTimeout(now)
|
||||
if !timeout.After(now) {
|
||||
return nil, errors.New("unable to computing disconnecting timeouts")
|
||||
|
||||
@@ -316,12 +316,14 @@ func (h *Harness) SetNoSubmit() {
|
||||
h.noSubmit = true
|
||||
}
|
||||
|
||||
// helper method to create allocations with given jobs and resources
|
||||
// CreateAlloc is helper method to create allocations with given jobs and
|
||||
// resources
|
||||
func CreateAlloc(id string, job *structs.Job, resource *structs.Resources) *structs.Allocation {
|
||||
return CreateAllocInner(id, job, resource, nil, nil)
|
||||
}
|
||||
|
||||
// helper method to create allocation with network at the task group level
|
||||
// CreateAllocWithTaskgroupNetwork is is helper method to create allocation with
|
||||
// network at the task group level
|
||||
func CreateAllocWithTaskgroupNetwork(id string, job *structs.Job, resource *structs.Resources, tgNet *structs.NetworkResource) *structs.Allocation {
|
||||
return CreateAllocInner(id, job, resource, nil, tgNet)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,8 @@ import (
|
||||
"github.com/shoenig/test/must"
|
||||
)
|
||||
|
||||
// Assert CA file exists and is a valid CA Returns the CA
|
||||
// IsValidCertificate asserts the CA file exists and is a valid CA Returns the
|
||||
// CA
|
||||
func IsValidCertificate(t *testing.T, caPath string) *x509.Certificate {
|
||||
t.Helper()
|
||||
|
||||
@@ -28,7 +29,8 @@ func IsValidCertificate(t *testing.T, caPath string) *x509.Certificate {
|
||||
return ca
|
||||
}
|
||||
|
||||
// Assert key file exists and is a valid signer returns a bool
|
||||
// IsValidSigner asserts the key file exists and is a valid signer returns a
|
||||
// bool
|
||||
func IsValidSigner(t *testing.T, keyPath string) bool {
|
||||
t.Helper()
|
||||
|
||||
|
||||
@@ -335,7 +335,7 @@ func WaitForRunning(t testing.TB, rpc rpcFn, job *structs.Job) []*structs.AllocL
|
||||
return WaitForRunningWithToken(t, rpc, job, "")
|
||||
}
|
||||
|
||||
// WaitforJobAllocStatus blocks until the ClientStatus of allocations for a job
|
||||
// WaitForJobAllocStatus blocks until the ClientStatus of allocations for a job
|
||||
// match the expected map of <ClientStatus>: <count>.
|
||||
func WaitForJobAllocStatus(t testing.TB, rpc rpcFn, job *structs.Job, allocStatus map[string]int) {
|
||||
t.Helper()
|
||||
@@ -387,7 +387,7 @@ func WaitForJobAllocStatusWithToken(t testing.TB, rpc rpcFn, job *structs.Job, a
|
||||
return allocs
|
||||
}
|
||||
|
||||
// WaitforJobEvalStatus blocks until the job's evals match the status described
|
||||
// WaitForJobEvalStatus blocks until the job's evals match the status described
|
||||
// in the map of <Eval.Status>: <count>.
|
||||
func WaitForJobEvalStatus(t testing.TB, rpc rpcFn, job *structs.Job, evalStatus map[string]int) []*structs.Evaluation {
|
||||
return WaitForJobEvalStatusWithToken(t, rpc, job, evalStatus, "")
|
||||
|
||||
@@ -71,38 +71,38 @@ func GetVersion() *VersionInfo {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *VersionInfo) VersionNumber() string {
|
||||
version := c.Version
|
||||
func (v *VersionInfo) VersionNumber() string {
|
||||
version := v.Version
|
||||
|
||||
if c.VersionPrerelease != "" {
|
||||
version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease)
|
||||
if v.VersionPrerelease != "" {
|
||||
version = fmt.Sprintf("%s-%s", version, v.VersionPrerelease)
|
||||
}
|
||||
|
||||
if c.VersionMetadata != "" {
|
||||
version = fmt.Sprintf("%s+%s", version, c.VersionMetadata)
|
||||
if v.VersionMetadata != "" {
|
||||
version = fmt.Sprintf("%s+%s", version, v.VersionMetadata)
|
||||
}
|
||||
|
||||
return version
|
||||
}
|
||||
|
||||
func (c *VersionInfo) FullVersionNumber(rev bool) string {
|
||||
func (v *VersionInfo) FullVersionNumber(rev bool) string {
|
||||
var versionString bytes.Buffer
|
||||
|
||||
fmt.Fprintf(&versionString, "Nomad v%s", c.Version)
|
||||
if c.VersionPrerelease != "" {
|
||||
fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease)
|
||||
fmt.Fprintf(&versionString, "Nomad v%s", v.Version)
|
||||
if v.VersionPrerelease != "" {
|
||||
fmt.Fprintf(&versionString, "-%s", v.VersionPrerelease)
|
||||
}
|
||||
|
||||
if c.VersionMetadata != "" {
|
||||
fmt.Fprintf(&versionString, "+%s", c.VersionMetadata)
|
||||
if v.VersionMetadata != "" {
|
||||
fmt.Fprintf(&versionString, "+%s", v.VersionMetadata)
|
||||
}
|
||||
|
||||
if !c.BuildDate.IsZero() {
|
||||
fmt.Fprintf(&versionString, "\nBuildDate %s", c.BuildDate.Format(time.RFC3339))
|
||||
if !v.BuildDate.IsZero() {
|
||||
fmt.Fprintf(&versionString, "\nBuildDate %s", v.BuildDate.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
if rev && c.Revision != "" {
|
||||
fmt.Fprintf(&versionString, "\nRevision %s", c.Revision)
|
||||
if rev && v.Revision != "" {
|
||||
fmt.Fprintf(&versionString, "\nRevision %s", v.Revision)
|
||||
}
|
||||
|
||||
return versionString.String()
|
||||
|
||||
Reference in New Issue
Block a user