mirror of
https://github.com/kemko/nomad.git
synced 2026-01-06 10:25:42 +03:00
Merge pull request #11098 from hashicorp/b-fixup-all-incorrect-docstrings
chore: fix incorrect docstring formatting.
This commit is contained in:
@@ -276,7 +276,7 @@ func (d *AllocDir) Move(other *AllocDir, tasks []*structs.Task) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tears down previously build directory structure.
|
||||
// Destroy tears down previously build directory structure.
|
||||
func (d *AllocDir) Destroy() error {
|
||||
// Unmount all mounted shared alloc dirs.
|
||||
var mErr multierror.Error
|
||||
|
||||
@@ -435,9 +435,8 @@ func (tr *TaskRunner) initLabels() {
|
||||
}
|
||||
}
|
||||
|
||||
// Mark a task as failed and not to run. Aimed to be invoked when alloc runner
|
||||
// prestart hooks failed.
|
||||
// Should never be called with Run().
|
||||
// MarkFailedDead marks a task as failed and not to run. Aimed to be invoked
|
||||
// when alloc runner prestart hooks failed. Should never be called with Run().
|
||||
func (tr *TaskRunner) MarkFailedDead(reason string) {
|
||||
defer close(tr.waitCh)
|
||||
|
||||
|
||||
@@ -904,7 +904,7 @@ func (c *Client) GetAllocStats(allocID string) (interfaces.AllocStatsReporter, e
|
||||
return ar.StatsReporter(), nil
|
||||
}
|
||||
|
||||
// HostStats returns all the stats related to a Nomad client
|
||||
// LatestHostStats returns all the stats related to a Nomad client.
|
||||
func (c *Client) LatestHostStats() *stats.HostStats {
|
||||
return c.hostStatsCollector.Stats()
|
||||
}
|
||||
|
||||
@@ -441,8 +441,8 @@ func (c *Config) ReadStringListToMap(keys ...string) map[string]struct{} {
|
||||
return splitValue(val)
|
||||
}
|
||||
|
||||
// ReadStringListToMap tries to parse the specified option as a comma separated list.
|
||||
// If there is an error in parsing, an empty list is returned.
|
||||
// ReadStringListToMapDefault tries to parse the specified option as a comma
|
||||
// separated list. If there is an error in parsing, an empty list is returned.
|
||||
func (c *Config) ReadStringListToMapDefault(key, defaultValue string) map[string]struct{} {
|
||||
return c.ReadStringListAlternativeToMapDefault([]string{key}, defaultValue)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ type MountPointDetector interface {
|
||||
type DefaultMountPointDetector struct {
|
||||
}
|
||||
|
||||
// Call out to the default cgroup library
|
||||
// MountPoint calls out to the default cgroup library.
|
||||
func (b *DefaultMountPointDetector) MountPoint() (string, error) {
|
||||
return cgutil.FindCgroupMountpointDir()
|
||||
}
|
||||
|
||||
@@ -103,7 +103,7 @@ func (a *AllocGarbageCollector) Run() {
|
||||
}
|
||||
}
|
||||
|
||||
// Force the garbage collector to run.
|
||||
// Trigger forces the garbage collector to run.
|
||||
func (a *AllocGarbageCollector) Trigger() {
|
||||
select {
|
||||
case a.triggerCh <- struct{}{}:
|
||||
|
||||
@@ -45,8 +45,8 @@ func (m *PluginGroup) RegisterAndRun(manager PluginManager) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ready returns a channel which will be closed once all plugin managers are ready.
|
||||
// A timeout for waiting on each manager is given
|
||||
// WaitForFirstFingerprint returns a channel which will be closed once all
|
||||
// plugin managers are ready. A timeout for waiting on each manager is given
|
||||
func (m *PluginGroup) WaitForFirstFingerprint(ctx context.Context) (<-chan struct{}, error) {
|
||||
m.mLock.Lock()
|
||||
defer m.mLock.Unlock()
|
||||
|
||||
@@ -112,7 +112,8 @@ func mergeWriteOptions(opts []WriteOption) WriteOptions {
|
||||
return writeOptions
|
||||
}
|
||||
|
||||
// Enable Batch mode for write requests (Put* and Delete* operations above).
|
||||
// WithBatchMode enables Batch mode for write requests (Put* and Delete*
|
||||
// operations above).
|
||||
func WithBatchMode() WriteOption {
|
||||
return func(s *WriteOptions) {
|
||||
s.BatchMode = true
|
||||
|
||||
@@ -319,7 +319,7 @@ type networkStatusEntry struct {
|
||||
NetworkStatus *structs.AllocNetworkStatus
|
||||
}
|
||||
|
||||
// PutDeploymentStatus stores an allocation's DeploymentStatus or returns an
|
||||
// PutNetworkStatus stores an allocation's DeploymentStatus or returns an
|
||||
// error.
|
||||
func (s *BoltStateDB) PutNetworkStatus(allocID string, ds *structs.AllocNetworkStatus, opts ...WriteOption) error {
|
||||
return s.updateWithOptions(opts, func(tx *boltdd.Tx) error {
|
||||
|
||||
@@ -73,7 +73,7 @@ func backupDB(bdb *bolt.DB, dst string) error {
|
||||
})
|
||||
}
|
||||
|
||||
// UpgradeSchema upgrades the boltdb schema. Example 0.8 schema:
|
||||
// UpgradeAllocs upgrades the boltdb schema. Example 0.8 schema:
|
||||
//
|
||||
// * allocations
|
||||
// * 15d83e8a-74a2-b4da-3f17-ed5c12895ea8
|
||||
|
||||
@@ -620,7 +620,7 @@ func (b *Builder) Build() *TaskEnv {
|
||||
return NewTaskEnv(envMap, envMapClient, deviceEnvs, nodeAttrs, b.clientTaskRoot, b.clientSharedAllocDir)
|
||||
}
|
||||
|
||||
// Update task updates the environment based on a new alloc and task.
|
||||
// UpdateTask updates the environment based on a new alloc and task.
|
||||
func (b *Builder) UpdateTask(alloc *structs.Allocation, task *structs.Task) *Builder {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
@@ -402,7 +402,7 @@ func (c *Command) isValidConfig(config, cmdConfig *Config) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// setupLoggers is used to setup the logGate, and our logOutput
|
||||
// SetupLoggers is used to set up the logGate, and our logOutput
|
||||
func SetupLoggers(ui cli.Ui, config *Config) (*logutils.LevelFilter, *gatedwriter.Writer, io.Writer) {
|
||||
// Setup logging. First create the gated log writer, which will
|
||||
// store logs until we're ready to show them. Then create the level
|
||||
|
||||
@@ -386,7 +386,7 @@ type JobGetter struct {
|
||||
testStdin io.Reader
|
||||
}
|
||||
|
||||
// StructJob returns the Job struct from jobfile.
|
||||
// ApiJob returns the Job struct from jobfile.
|
||||
func (j *JobGetter) ApiJob(jpath string) (*api.Job, error) {
|
||||
return j.ApiJobWithArgs(jpath, nil, nil)
|
||||
}
|
||||
|
||||
@@ -987,8 +987,9 @@ func (c *OperatorDebugCommand) trap() {
|
||||
}()
|
||||
}
|
||||
|
||||
// TarCZF, like the tar command, recursively builds a gzip compressed tar archive from a
|
||||
// directory. If not empty, all files in the bundle are prefixed with the target path
|
||||
// TarCZF like the tar command, recursively builds a gzip compressed tar
|
||||
// archive from a directory. If not empty, all files in the bundle are prefixed
|
||||
// with the target path.
|
||||
func TarCZF(archive string, src, target string) error {
|
||||
// ensure the src actually exists before trying to tar it
|
||||
if _, err := os.Stat(src); err != nil {
|
||||
|
||||
@@ -505,8 +505,8 @@ var (
|
||||
noSuchProcessErr = "no such process"
|
||||
)
|
||||
|
||||
// Exit cleans up the alloc directory, destroys resource container and kills the
|
||||
// user process
|
||||
// Shutdown cleans up the alloc directory, destroys resource container and
|
||||
// kills the user process.
|
||||
func (e *UniversalExecutor) Shutdown(signal string, grace time.Duration) error {
|
||||
e.logger.Debug("shutdown requested", "signal", signal, "grace_period_ms", grace.Round(time.Millisecond))
|
||||
var merr multierror.Error
|
||||
|
||||
@@ -291,7 +291,7 @@ func (tc *ConsulTemplateTest) TestTemplatePathInterpolation_Bad(f *framework.F)
|
||||
f.True(found, "alloc failed but NOT due to expected source path escape error")
|
||||
}
|
||||
|
||||
// TestTemplatePathInterpolation_SharedAlloc asserts that NOMAD_ALLOC_DIR
|
||||
// TestTemplatePathInterpolation_SharedAllocDir asserts that NOMAD_ALLOC_DIR
|
||||
// is supported as a destination for artifact and template blocks, and
|
||||
// that it is properly interpolated for task drivers with varying
|
||||
// filesystem isolation
|
||||
|
||||
@@ -89,8 +89,8 @@ func JobInspectTemplate(jobID, template string) (string, error) {
|
||||
return outStr, nil
|
||||
}
|
||||
|
||||
// Register registers a jobspec from a string, also with a unique ID.
|
||||
// The caller is responsible for recording that ID for later cleanup.
|
||||
// RegisterFromJobspec registers a jobspec from a string, also with a unique
|
||||
// ID. The caller is responsible for recording that ID for later cleanup.
|
||||
func RegisterFromJobspec(jobID, jobspec string) error {
|
||||
|
||||
cmd := exec.Command("nomad", "job", "run", "-detach", "-")
|
||||
|
||||
@@ -10,7 +10,7 @@ type WaitConfig struct {
|
||||
Retries int64
|
||||
}
|
||||
|
||||
// Return a default wait config of 10s
|
||||
// OrDefault returns a default wait config of 10s.
|
||||
func (wc *WaitConfig) OrDefault() (time.Duration, int64) {
|
||||
if wc == nil {
|
||||
return time.Millisecond * 100, 100
|
||||
|
||||
@@ -25,8 +25,8 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure cluster has leader and at least 1 client node
|
||||
// in a ready state before running tests
|
||||
// BeforeAll ensures the cluster has leader and at least 1 client node in a
|
||||
// ready state before running tests.
|
||||
func (tc *LifecycleE2ETest) BeforeAll(f *framework.F) {
|
||||
e2eutil.WaitForLeader(f.T(), tc.Nomad())
|
||||
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 1)
|
||||
|
||||
@@ -34,9 +34,9 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// Stand up prometheus to collect metrics from all clients and allocs,
|
||||
// with fabio as a system job in front of it so that we don't need to
|
||||
// have prometheus use host networking
|
||||
// BeforeAll stands up Prometheus to collect metrics from all clients and
|
||||
// allocs, with fabio as a system job in front of it so that we don't need to
|
||||
// have prometheus use host networking.
|
||||
func (tc *MetricsTest) BeforeAll(f *framework.F) {
|
||||
t := f.T()
|
||||
e2eutil.WaitForLeader(t, tc.Nomad())
|
||||
@@ -45,8 +45,8 @@ func (tc *MetricsTest) BeforeAll(f *framework.F) {
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
// Clean up the target jobs after each test case, but keep fabio/prometheus
|
||||
// for reuse between the two test cases (Windows vs Linux)
|
||||
// AfterEach CleanS up the target jobs after each test case, but keep
|
||||
// fabio/prometheus for reuse between the two test cases (Windows vs Linux).
|
||||
func (tc *MetricsTest) AfterEach(f *framework.F) {
|
||||
if os.Getenv("NOMAD_TEST_SKIPCLEANUP") == "1" {
|
||||
return
|
||||
@@ -58,7 +58,7 @@ func (tc *MetricsTest) AfterEach(f *framework.F) {
|
||||
tc.Nomad().System().GarbageCollect()
|
||||
}
|
||||
|
||||
// Clean up fabio/prometheus
|
||||
// AfterAll cleans up fabio/prometheus.
|
||||
func (tc *MetricsTest) AfterAll(f *framework.F) {
|
||||
if os.Getenv("NOMAD_TEST_SKIPCLEANUP") == "1" {
|
||||
return
|
||||
|
||||
@@ -287,8 +287,8 @@ func (tc *NodeDrainE2ETest) TestNodeDrainDeadline(f *framework.F) {
|
||||
), "node did not drain immediately following deadline")
|
||||
}
|
||||
|
||||
// TestNodeDrainDeadline tests the enforcement of the node drain -force flag
|
||||
// so that allocations are terminated immediately.
|
||||
// TestNodeDrainForce tests the enforcement of the node drain -force flag so
|
||||
// that allocations are terminated immediately.
|
||||
func (tc *NodeDrainE2ETest) TestNodeDrainForce(f *framework.F) {
|
||||
f.T().Skip("The behavior is unclear and test assertions don't capture intent. Issue 9902")
|
||||
|
||||
|
||||
@@ -239,8 +239,8 @@ func (tc *RescheduleE2ETest) TestRescheduleWithCanary(f *framework.F) {
|
||||
"deployment should be running")
|
||||
}
|
||||
|
||||
// TestRescheduleWithCanary updates a running job to fail, and verifies that
|
||||
// the job gets reverted
|
||||
// TestRescheduleWithCanaryAutoRevert updates a running job to fail, and
|
||||
// verifies that the job gets reverted.
|
||||
func (tc *RescheduleE2ETest) TestRescheduleWithCanaryAutoRevert(f *framework.F) {
|
||||
|
||||
jobID := "test-reschedule-canary-revert-" + uuid.Generate()[0:8]
|
||||
|
||||
@@ -81,7 +81,7 @@ func (cs Constraints) Check(v *version.Version) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns the string format of the constraints
|
||||
// String returns the string format of the constraints.
|
||||
func (cs Constraints) String() string {
|
||||
csStr := make([]string, len(cs))
|
||||
for i, c := range cs {
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Checks the current executable, then $GOPATH/bin, and finally the CWD, in that
|
||||
// order. If it can't be found, an error is returned.
|
||||
// NomadExecutable checks the current executable, then $GOPATH/bin, and finally
|
||||
// the CWD, in that order. If it can't be found, an error is returned.
|
||||
func NomadExecutable() (string, error) {
|
||||
nomadExe := "nomad"
|
||||
if runtime.GOOS == "windows" {
|
||||
|
||||
@@ -68,7 +68,7 @@ func HashUUID(input string) (output string, hashed bool) {
|
||||
return output, true
|
||||
}
|
||||
|
||||
// boolToPtr returns the pointer to a boolean
|
||||
// BoolToPtr returns the pointer to a boolean.
|
||||
func BoolToPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
@@ -278,7 +278,8 @@ func CompareMapStringString(a, b map[string]string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Helpers for copying generic structures.
|
||||
// Below is helpers for copying generic structures.
|
||||
|
||||
func CopyMapStringString(m map[string]string) map[string]string {
|
||||
l := len(m)
|
||||
if l == 0 {
|
||||
|
||||
@@ -60,7 +60,7 @@ func Init() error {
|
||||
return initErr
|
||||
}
|
||||
|
||||
// CPUModelName returns the number of CPU cores available
|
||||
// CPUNumCores returns the number of CPU cores available
|
||||
func CPUNumCores() int {
|
||||
return cpuNumCores
|
||||
}
|
||||
|
||||
@@ -298,7 +298,7 @@ func (c *Config) OutgoingTLSWrapper() (RegionWrapper, error) {
|
||||
|
||||
}
|
||||
|
||||
// Wrap a net.Conn into a client tls connection, performing any
|
||||
// WrapTLSClient wraps a net.Conn into a client tls connection, performing any
|
||||
// additional verification as needed.
|
||||
//
|
||||
// As of go 1.3, crypto/tls only supports either doing no certificate
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
// meta { ... }
|
||||
// }
|
||||
// ```
|
||||
|
||||
//
|
||||
// to
|
||||
//
|
||||
// ```
|
||||
|
||||
@@ -51,9 +51,9 @@ func (pq *ScoreHeap) Push(x interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Push implements heap.Interface and returns the top K scoring
|
||||
// elements in increasing order of Score. Callers must reverse the order
|
||||
// of returned elements to get the top K scoring elements in descending order
|
||||
// Pop implements heap.Interface and returns the top K scoring elements in
|
||||
// increasing order of Score. Callers must reverse the order of returned
|
||||
// elements to get the top K scoring elements in descending order.
|
||||
func (pq *ScoreHeap) Pop() interface{} {
|
||||
old := pq.items
|
||||
n := len(old)
|
||||
|
||||
@@ -179,7 +179,7 @@ func (e *Eval) Ack(args *structs.EvalAckRequest,
|
||||
return nil
|
||||
}
|
||||
|
||||
// NAck is used to negative acknowledge completion of a dequeued evaluation
|
||||
// Nack is used to negative acknowledge completion of a dequeued evaluation.
|
||||
func (e *Eval) Nack(args *structs.EvalAckRequest,
|
||||
reply *structs.GenericResponse) error {
|
||||
if done, err := e.srv.forward("Eval.Nack", args, args, reply); done {
|
||||
|
||||
@@ -137,7 +137,7 @@ type FSMConfig struct {
|
||||
EventBufferSize int64
|
||||
}
|
||||
|
||||
// NewFSMPath is used to construct a new FSM with a blank state
|
||||
// NewFSM is used to construct a new FSM with a blank state.
|
||||
func NewFSM(config *FSMConfig) (*nomadFSM, error) {
|
||||
// Create a state store
|
||||
sconfig := &state.StateStoreConfig{
|
||||
|
||||
@@ -1349,7 +1349,7 @@ func Alloc() *structs.Allocation {
|
||||
return alloc
|
||||
}
|
||||
|
||||
// ConnectJob adds a Connect proxy sidecar group service to mock.Alloc.
|
||||
// ConnectAlloc adds a Connect proxy sidecar group service to mock.Alloc.
|
||||
func ConnectAlloc() *structs.Allocation {
|
||||
alloc := Alloc()
|
||||
alloc.Job = ConnectJob()
|
||||
|
||||
@@ -264,7 +264,7 @@ func (s *StateStore) Abandon() {
|
||||
close(s.abandonCh)
|
||||
}
|
||||
|
||||
// StopStopEventBroker calls the cancel func for the state stores event
|
||||
// StopEventBroker calls the cancel func for the state stores event
|
||||
// publisher. It should be called during server shutdown.
|
||||
func (s *StateStore) StopEventBroker() {
|
||||
s.stopEventBroker()
|
||||
@@ -2051,7 +2051,7 @@ func (s *StateStore) JobsByGC(ws memdb.WatchSet, gc bool) (memdb.ResultIterator,
|
||||
return iter, nil
|
||||
}
|
||||
|
||||
// JobSummary returns a job summary object which matches a specific id.
|
||||
// JobSummaryByID returns a job summary object which matches a specific id.
|
||||
func (s *StateStore) JobSummaryByID(ws memdb.WatchSet, namespace, jobID string) (*structs.JobSummary, error) {
|
||||
txn := s.db.ReadTxn()
|
||||
|
||||
@@ -2197,8 +2197,8 @@ func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, namespace, id string) (*st
|
||||
return s.CSIVolumeDenormalizePluginsTxn(txn, vol.Copy())
|
||||
}
|
||||
|
||||
// CSIVolumes looks up csi_volumes by pluginID. Caller should snapshot if it
|
||||
// wants to also denormalize the plugins.
|
||||
// CSIVolumesByPluginID looks up csi_volumes by pluginID. Caller should
|
||||
// snapshot if it wants to also denormalize the plugins.
|
||||
func (s *StateStore) CSIVolumesByPluginID(ws memdb.WatchSet, namespace, prefix, pluginID string) (memdb.ResultIterator, error) {
|
||||
txn := s.db.ReadTxn()
|
||||
|
||||
@@ -2811,8 +2811,8 @@ func (s *StateStore) UpsertEvals(msgType structs.MessageType, index uint64, eval
|
||||
return err
|
||||
}
|
||||
|
||||
// UpsertEvals is used to upsert a set of evaluations, like UpsertEvals
|
||||
// but in a transaction. Useful for when making multiple modifications atomically
|
||||
// UpsertEvalsTxn is used to upsert a set of evaluations, like UpsertEvals but
|
||||
// in a transaction. Useful for when making multiple modifications atomically.
|
||||
func (s *StateStore) UpsertEvalsTxn(index uint64, evals []*structs.Evaluation, txn Txn) error {
|
||||
// Do a nested upsert
|
||||
jobs := make(map[structs.NamespacedID]string, len(evals))
|
||||
@@ -3478,7 +3478,7 @@ func allocNamespaceFilter(namespace string) func(interface{}) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// AllocsByIDPrefix is used to lookup allocs by prefix
|
||||
// AllocsByIDPrefixAllNSs is used to lookup allocs by prefix.
|
||||
func (s *StateStore) AllocsByIDPrefixAllNSs(ws memdb.WatchSet, prefix string) (memdb.ResultIterator, error) {
|
||||
txn := s.db.ReadTxn()
|
||||
|
||||
@@ -3520,7 +3520,8 @@ func allocsByNodeTxn(txn ReadTxn, ws memdb.WatchSet, node string) ([]*structs.Al
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// AllocsByNode returns all the allocations by node and terminal status
|
||||
// AllocsByNodeTerminal returns all the allocations by node and terminal
|
||||
// status.
|
||||
func (s *StateStore) AllocsByNodeTerminal(ws memdb.WatchSet, node string, terminal bool) ([]*structs.Allocation, error) {
|
||||
txn := s.db.ReadTxn()
|
||||
|
||||
@@ -3666,7 +3667,7 @@ func (s *StateStore) allocsByNamespaceImpl(ws memdb.WatchSet, txn *txn, namespac
|
||||
return iter, nil
|
||||
}
|
||||
|
||||
// UpsertVaultAccessors is used to register a set of Vault Accessors
|
||||
// UpsertVaultAccessor is used to register a set of Vault Accessors.
|
||||
func (s *StateStore) UpsertVaultAccessor(index uint64, accessors []*structs.VaultAccessor) error {
|
||||
txn := s.db.WriteTxn(index)
|
||||
defer txn.Abort()
|
||||
@@ -4258,7 +4259,7 @@ func (s *StateStore) UpdateDeploymentAllocHealth(msgType structs.MessageType, in
|
||||
return txn.Commit()
|
||||
}
|
||||
|
||||
// LastIndex returns the greatest index value for all indexes
|
||||
// LatestIndex returns the greatest index value for all indexes.
|
||||
func (s *StateStore) LatestIndex() (uint64, error) {
|
||||
indexes, err := s.Indexes()
|
||||
if err != nil {
|
||||
@@ -5369,7 +5370,7 @@ func (s *StateStore) CanBootstrapACLToken() (bool, uint64, error) {
|
||||
return false, out.(*IndexEntry).Value, nil
|
||||
}
|
||||
|
||||
// BootstrapACLToken is used to create an initial ACL token
|
||||
// BootstrapACLTokens is used to create an initial ACL token.
|
||||
func (s *StateStore) BootstrapACLTokens(msgType structs.MessageType, index uint64, resetIndex uint64, token *structs.ACLToken) error {
|
||||
txn := s.db.WriteTxnMsgT(msgType, index)
|
||||
defer txn.Abort()
|
||||
@@ -5681,7 +5682,7 @@ func (s *StateStore) setClusterMetadata(txn *txn, meta *structs.ClusterMetadata)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpsertScalingPolicy is used to insert a new scaling policy.
|
||||
// UpsertScalingPolicies is used to insert a new scaling policy.
|
||||
func (s *StateStore) UpsertScalingPolicies(index uint64, scalingPolicies []*structs.ScalingPolicy) error {
|
||||
txn := s.db.WriteTxn(index)
|
||||
defer txn.Abort()
|
||||
@@ -5693,7 +5694,7 @@ func (s *StateStore) UpsertScalingPolicies(index uint64, scalingPolicies []*stru
|
||||
return txn.Commit()
|
||||
}
|
||||
|
||||
// upsertScalingPolicy is used to insert a new scaling policy.
|
||||
// UpsertScalingPoliciesTxn is used to insert a new scaling policy.
|
||||
func (s *StateStore) UpsertScalingPoliciesTxn(index uint64, scalingPolicies []*structs.ScalingPolicy,
|
||||
txn *txn) error {
|
||||
|
||||
@@ -5831,7 +5832,7 @@ func (s *StateStore) NamespaceNames() ([]string, error) {
|
||||
return nses, nil
|
||||
}
|
||||
|
||||
// UpsertNamespace is used to register or update a set of namespaces
|
||||
// UpsertNamespaces is used to register or update a set of namespaces.
|
||||
func (s *StateStore) UpsertNamespaces(index uint64, namespaces []*structs.Namespace) error {
|
||||
txn := s.db.WriteTxn(index)
|
||||
defer txn.Abort()
|
||||
@@ -5962,7 +5963,7 @@ func (s *StateStore) DeleteScalingPolicies(index uint64, ids []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteScalingPolicies is used to delete a set of scaling policies by ID
|
||||
// DeleteScalingPoliciesTxn is used to delete a set of scaling policies by ID.
|
||||
func (s *StateStore) DeleteScalingPoliciesTxn(index uint64, ids []string, txn *txn) error {
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
|
||||
@@ -113,7 +113,7 @@ func (s *Status) Members(args *structs.GenericRequest, reply *structs.ServerMemb
|
||||
return nil
|
||||
}
|
||||
|
||||
// Used by Autopilot to query the raft stats of the local server.
|
||||
// RaftStats is used by Autopilot to query the raft stats of the local server.
|
||||
func (s *Status) RaftStats(args struct{}, reply *autopilot.ServerStats) error {
|
||||
stats := s.srv.raft.Stats()
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ func NewEventBroker(ctx context.Context, aclDelegate ACLDelegate, cfg EventBroke
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// Returns the current length of the event buffer
|
||||
// Len returns the current length of the event buffer.
|
||||
func (e *EventBroker) Len() int {
|
||||
return e.eventBuf.Len()
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ type AutopilotConfig struct {
|
||||
ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"`
|
||||
}
|
||||
|
||||
// DefaultAutopilotConfig() returns the canonical defaults for the Nomad
|
||||
// DefaultAutopilotConfig returns the canonical defaults for the Nomad
|
||||
// `autopilot` configuration.
|
||||
func DefaultAutopilotConfig() *AutopilotConfig {
|
||||
return &AutopilotConfig{
|
||||
|
||||
@@ -127,7 +127,7 @@ type ConsulConfig struct {
|
||||
Namespace string `hcl:"namespace"`
|
||||
}
|
||||
|
||||
// DefaultConsulConfig() returns the canonical defaults for the Nomad
|
||||
// DefaultConsulConfig returns the canonical defaults for the Nomad
|
||||
// `consul` configuration. Uses Consul's default configuration which reads
|
||||
// environment variables.
|
||||
func DefaultConsulConfig() *ConsulConfig {
|
||||
|
||||
@@ -81,7 +81,7 @@ type VaultConfig struct {
|
||||
TLSServerName string `hcl:"tls_server_name"`
|
||||
}
|
||||
|
||||
// DefaultVaultConfig() returns the canonical defaults for the Nomad
|
||||
// DefaultVaultConfig returns the canonical defaults for the Nomad
|
||||
// `vault` configuration.
|
||||
func DefaultVaultConfig() *VaultConfig {
|
||||
return &VaultConfig{
|
||||
@@ -152,7 +152,7 @@ func (a *VaultConfig) Merge(b *VaultConfig) *VaultConfig {
|
||||
return &result
|
||||
}
|
||||
|
||||
// ApiConfig() returns a usable Vault config that can be passed directly to
|
||||
// ApiConfig returns a usable Vault config that can be passed directly to
|
||||
// hashicorp/vault/api.
|
||||
func (c *VaultConfig) ApiConfig() (*vault.Config, error) {
|
||||
conf := vault.DefaultConfig()
|
||||
|
||||
@@ -39,7 +39,7 @@ type ConsulUsage struct {
|
||||
KV bool
|
||||
}
|
||||
|
||||
// Unused returns true if Consul is used for registering services or reading from
|
||||
// Used returns true if Consul is used for registering services or reading from
|
||||
// the keystore.
|
||||
func (cu *ConsulUsage) Used() bool {
|
||||
switch {
|
||||
|
||||
@@ -133,7 +133,7 @@ func ValidCSIVolumeAccessMode(accessMode CSIVolumeAccessMode) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// ValidCSIVolumeAccessMode checks for a writable access mode
|
||||
// ValidCSIVolumeWriteAccessMode checks for a writable access mode.
|
||||
func ValidCSIVolumeWriteAccessMode(accessMode CSIVolumeAccessMode) bool {
|
||||
switch accessMode {
|
||||
case CSIVolumeAccessModeSingleNodeWriter,
|
||||
@@ -617,7 +617,7 @@ func (v *CSIVolume) claimRelease(claim *CSIVolumeClaim) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Equality by value
|
||||
// Equal checks equality by value.
|
||||
func (v *CSIVolume) Equal(o *CSIVolume) bool {
|
||||
if v == nil || o == nil {
|
||||
return v == o
|
||||
|
||||
@@ -183,8 +183,8 @@ func NewErrRPCCoded(code int, msg string) error {
|
||||
return fmt.Errorf("%s%d,%s", errRPCCodedErrorPrefix, code, msg)
|
||||
}
|
||||
|
||||
// NewErrRPCCoded wraps an RPC error with a code to be converted to HTTP status
|
||||
// code
|
||||
// NewErrRPCCodedf wraps an RPC error with a code to be converted to HTTP
|
||||
// status code.
|
||||
func NewErrRPCCodedf(code int, format string, args ...interface{}) error {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
return fmt.Errorf("%s%d,%s", errRPCCodedErrorPrefix, code, msg)
|
||||
|
||||
@@ -359,7 +359,7 @@ func VaultPoliciesSet(policies map[string]map[string]*Vault) []string {
|
||||
return flattened
|
||||
}
|
||||
|
||||
// VaultNaVaultNamespaceSet takes the structure returned by VaultPolicies and
|
||||
// VaultNamespaceSet takes the structure returned by VaultPolicies and
|
||||
// returns a set of required namespaces
|
||||
func VaultNamespaceSet(policies map[string]map[string]*Vault) []string {
|
||||
set := make(map[string]struct{})
|
||||
|
||||
@@ -264,7 +264,8 @@ func (idx *NetworkIndex) AddReservedPortRange(ports string) (collide bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// AddReservedPortsForIP
|
||||
// AddReservedPortsForIP checks whether any reserved ports collide with those
|
||||
// in use for the IP address.
|
||||
func (idx *NetworkIndex) AddReservedPortsForIP(ports string, ip string) (collide bool) {
|
||||
// Convert the ports into a slice of ints
|
||||
resPorts, err := ParsePortRanges(ports)
|
||||
@@ -566,7 +567,8 @@ func isPortReserved(haystack []int, needle int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// COMPAT(1.0) remove when NetworkResource is no longer used for materialized client view of ports
|
||||
// AllocatedPortsToNetworkResouce is a COMPAT(1.0) remove when NetworkResource
|
||||
// is no longer used for materialized client view of ports.
|
||||
func AllocatedPortsToNetworkResouce(ask *NetworkResource, ports AllocatedPorts, node *NodeResources) *NetworkResource {
|
||||
out := ask.Copy()
|
||||
|
||||
|
||||
@@ -283,15 +283,15 @@ func (di *DriverInfo) MergeHealthCheck(other *DriverInfo) {
|
||||
di.UpdateTime = other.UpdateTime
|
||||
}
|
||||
|
||||
// MergeFingerprint merges information from fingerprinting a node for a driver
|
||||
// into a node's driver info for that driver.
|
||||
// MergeFingerprintInfo merges information from fingerprinting a node for a
|
||||
// driver into a node's driver info for that driver.
|
||||
func (di *DriverInfo) MergeFingerprintInfo(other *DriverInfo) {
|
||||
di.Detected = other.Detected
|
||||
di.Attributes = other.Attributes
|
||||
}
|
||||
|
||||
// DriverInfo determines if two driver info objects are equal..As this is used
|
||||
// in the process of health checking, we only check the fields that are
|
||||
// HealthCheckEquals determines if two driver info objects are equal. As this
|
||||
// is used in the process of health checking, we only check the fields that are
|
||||
// computed by the health checker. In the future, this will be merged.
|
||||
func (di *DriverInfo) HealthCheckEquals(other *DriverInfo) bool {
|
||||
if di == nil && other == nil {
|
||||
|
||||
@@ -319,7 +319,7 @@ func (q QueryOptions) RequestNamespace() string {
|
||||
return q.Namespace
|
||||
}
|
||||
|
||||
// QueryOption only applies to reads, so always true
|
||||
// IsRead only applies to reads, so always true.
|
||||
func (q QueryOptions) IsRead() bool {
|
||||
return true
|
||||
}
|
||||
@@ -415,7 +415,7 @@ func (w WriteRequest) RequestNamespace() string {
|
||||
return w.Namespace
|
||||
}
|
||||
|
||||
// WriteRequest only applies to writes, always false
|
||||
// IsRead only applies to writes, always false.
|
||||
func (w WriteRequest) IsRead() bool {
|
||||
return false
|
||||
}
|
||||
@@ -2066,11 +2066,12 @@ func (n *Node) TerminalStatus() bool {
|
||||
}
|
||||
}
|
||||
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
// ComparableReservedResources returns the reserved resouces on the node
|
||||
// handling upgrade paths. Reserved networks must be handled separately. After
|
||||
// 0.11 calls to this should be replaced with:
|
||||
// node.ReservedResources.Comparable()
|
||||
//
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
func (n *Node) ComparableReservedResources() *ComparableResources {
|
||||
// See if we can no-op
|
||||
if n.Reserved == nil && n.ReservedResources == nil {
|
||||
@@ -2098,10 +2099,11 @@ func (n *Node) ComparableReservedResources() *ComparableResources {
|
||||
}
|
||||
}
|
||||
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
// ComparableResources returns the resouces on the node
|
||||
// handling upgrade paths. Networking must be handled separately. After 0.11
|
||||
// calls to this should be replaced with: node.NodeResources.Comparable()
|
||||
//
|
||||
// // COMPAT(0.11): Remove in 0.11
|
||||
func (n *Node) ComparableResources() *ComparableResources {
|
||||
// Node already has 0.9+ behavior
|
||||
if n.NodeResources != nil {
|
||||
@@ -2286,6 +2288,8 @@ func (r *Resources) Merge(other *Resources) {
|
||||
}
|
||||
}
|
||||
|
||||
// Equals Resources.
|
||||
//
|
||||
// COMPAT(0.10): Remove in 0.10
|
||||
func (r *Resources) Equals(o *Resources) bool {
|
||||
if r == o {
|
||||
@@ -2304,12 +2308,14 @@ func (r *Resources) Equals(o *Resources) bool {
|
||||
r.Devices.Equals(&o.Devices)
|
||||
}
|
||||
|
||||
// COMPAT(0.10): Remove in 0.10
|
||||
// ResourceDevices are part of Resources
|
||||
// ResourceDevices are part of Resources.
|
||||
//
|
||||
// COMPAT(0.10): Remove in 0.10.
|
||||
type ResourceDevices []*RequestedDevice
|
||||
|
||||
// Equals ResourceDevices as set keyed by Name.
|
||||
//
|
||||
// COMPAT(0.10): Remove in 0.10
|
||||
// Equals ResourceDevices as set keyed by Name
|
||||
func (d *ResourceDevices) Equals(o *ResourceDevices) bool {
|
||||
if d == o {
|
||||
return true
|
||||
@@ -2333,6 +2339,8 @@ func (d *ResourceDevices) Equals(o *ResourceDevices) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Canonicalize the Resources struct.
|
||||
//
|
||||
// COMPAT(0.10): Remove in 0.10
|
||||
func (r *Resources) Canonicalize() {
|
||||
// Ensure that an empty and nil slices are treated the same to avoid scheduling
|
||||
@@ -2422,6 +2430,8 @@ func (r *Resources) Add(delta *Resources) {
|
||||
}
|
||||
}
|
||||
|
||||
// GoString returns the string representation of the Resources struct.
|
||||
//
|
||||
// COMPAT(0.10): Remove in 0.10
|
||||
func (r *Resources) GoString() string {
|
||||
return fmt.Sprintf("*%#v", *r)
|
||||
@@ -3389,7 +3399,7 @@ type NodeReservedNetworkResources struct {
|
||||
ReservedHostPorts string
|
||||
}
|
||||
|
||||
// ParsePortHostPorts returns the reserved host ports.
|
||||
// ParseReservedHostPorts returns the reserved host ports.
|
||||
func (n *NodeReservedNetworkResources) ParseReservedHostPorts() ([]uint64, error) {
|
||||
return ParsePortRanges(n.ReservedHostPorts)
|
||||
}
|
||||
@@ -3895,13 +3905,13 @@ func (c *ComparableResources) Superset(other *ComparableResources) (bool, string
|
||||
return true, ""
|
||||
}
|
||||
|
||||
// allocated finds the matching net index using device name
|
||||
// NetIndex finds the matching net index using device name
|
||||
func (c *ComparableResources) NetIndex(n *NetworkResource) int {
|
||||
return c.Flattened.Networks.NetIndex(n)
|
||||
}
|
||||
|
||||
const (
|
||||
// JobTypeNomad is reserved for internal system tasks and is
|
||||
// JobTypeCore is reserved for internal system tasks and is
|
||||
// always handled by the CoreScheduler.
|
||||
JobTypeCore = "_core"
|
||||
JobTypeService = "service"
|
||||
@@ -3927,7 +3937,7 @@ const (
|
||||
// JobMaxPriority is the maximum allowed priority
|
||||
JobMaxPriority = 100
|
||||
|
||||
// Ensure CoreJobPriority is higher than any user
|
||||
// CoreJobPriority should be higher than any user
|
||||
// specified job so that it gets priority. This is important
|
||||
// for the system to remain healthy.
|
||||
CoreJobPriority = JobMaxPriority * 2
|
||||
@@ -4784,8 +4794,8 @@ func (u *UpdateStrategy) IsEmpty() bool {
|
||||
return u.MaxParallel == 0
|
||||
}
|
||||
|
||||
// Rolling returns if a rolling strategy should be used.
|
||||
// TODO(alexdadgar): Remove once no longer used by the scheduler.
|
||||
// Rolling returns if a rolling strategy should be used
|
||||
func (u *UpdateStrategy) Rolling() bool {
|
||||
return u.Stagger > 0 && u.MaxParallel > 0
|
||||
}
|
||||
@@ -5323,7 +5333,7 @@ type JobScalingEvents struct {
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// Factory method for ScalingEvent objects
|
||||
// NewScalingEvent method for ScalingEvent objects.
|
||||
func NewScalingEvent(message string) *ScalingEvent {
|
||||
return &ScalingEvent{
|
||||
Time: time.Now().Unix(),
|
||||
@@ -5519,7 +5529,7 @@ func (p *ScalingPolicy) Diff(p2 *ScalingPolicy) bool {
|
||||
return !reflect.DeepEqual(*p, copy)
|
||||
}
|
||||
|
||||
// TarketTaskGroup updates a ScalingPolicy target to specify a given task group
|
||||
// TargetTaskGroup updates a ScalingPolicy target to specify a given task group
|
||||
func (p *ScalingPolicy) TargetTaskGroup(job *Job, tg *TaskGroup) *ScalingPolicy {
|
||||
p.Target = map[string]string{
|
||||
ScalingTargetNamespace: job.Namespace,
|
||||
@@ -9564,7 +9574,7 @@ func (a *Allocation) Terminated() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// SetStopped updates the allocation in place to a DesiredStatus stop, with the ClientStatus
|
||||
// SetStop updates the allocation in place to a DesiredStatus stop, with the ClientStatus
|
||||
func (a *Allocation) SetStop(clientStatus, clientDesc string) {
|
||||
a.DesiredStatus = AllocDesiredStatusStop
|
||||
a.ClientStatus = clientStatus
|
||||
@@ -9631,17 +9641,18 @@ func (a *Allocation) ShouldMigrate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetEventDisplayMessage populates the display message if its not already set,
|
||||
// SetEventDisplayMessages populates the display message if its not already set,
|
||||
// a temporary fix to handle old allocations that don't have it.
|
||||
// This method will be removed in a future release.
|
||||
func (a *Allocation) SetEventDisplayMessages() {
|
||||
setDisplayMsg(a.TaskStates)
|
||||
}
|
||||
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
// ComparableResources returns the resources on the allocation
|
||||
// handling upgrade paths. After 0.11 calls to this should be replaced with:
|
||||
// alloc.AllocatedResources.Comparable()
|
||||
//
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
func (a *Allocation) ComparableResources() *ComparableResources {
|
||||
// ALloc already has 0.9+ behavior
|
||||
if a.AllocatedResources != nil {
|
||||
@@ -9776,9 +9787,9 @@ type AllocListStub struct {
|
||||
ModifyTime int64
|
||||
}
|
||||
|
||||
// SetEventDisplayMessage populates the display message if its not already set,
|
||||
// a temporary fix to handle old allocations that don't have it.
|
||||
// This method will be removed in a future release.
|
||||
// SetEventDisplayMessages populates the display message if its not already
|
||||
// set, a temporary fix to handle old allocations that don't have it. This
|
||||
// method will be removed in a future release.
|
||||
func (a *AllocListStub) SetEventDisplayMessages() {
|
||||
setDisplayMsg(a.TaskStates)
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ func MockNode() *Node {
|
||||
return node
|
||||
}
|
||||
|
||||
// NvidiaNode returns a node with two instances of an Nvidia GPU
|
||||
// MockNvidiaNode returns a node with two instances of an Nvidia GPU
|
||||
func MockNvidiaNode() *Node {
|
||||
n := MockNode()
|
||||
n.NodeResources.Devices = []*NodeDeviceResource{
|
||||
|
||||
@@ -38,7 +38,7 @@ func (s *System) GarbageCollect(args *structs.GenericRequest, reply *structs.Gen
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReconcileSummaries reconciles the summaries of all the jobs in the state
|
||||
// ReconcileJobSummaries reconciles the summaries of all the jobs in the state
|
||||
// store
|
||||
func (s *System) ReconcileJobSummaries(args *structs.GenericRequest, reply *structs.GenericResponse) error {
|
||||
if done, err := s.srv.forward("System.ReconcileJobSummaries", args, args, reply); done {
|
||||
|
||||
@@ -46,7 +46,7 @@ func (v *TestVaultClient) LookupToken(ctx context.Context, token string) (*vapi.
|
||||
return secret, err
|
||||
}
|
||||
|
||||
// SetLookupTokenSecret sets the error that will be returned by the token
|
||||
// SetLookupTokenError sets the error that will be returned by the token
|
||||
// lookup
|
||||
func (v *TestVaultClient) SetLookupTokenError(token string, err error) {
|
||||
if v.LookupTokenErrors == nil {
|
||||
|
||||
@@ -297,7 +297,7 @@ func (c *Client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath s
|
||||
return c.NextNodeUnpublishVolumeErr
|
||||
}
|
||||
|
||||
// Shutdown the client and ensure any connections are cleaned up.
|
||||
// Close the client and ensure any connections are cleaned up.
|
||||
func (c *Client) Close() error {
|
||||
|
||||
c.NextPluginInfoResponse = nil
|
||||
|
||||
@@ -250,7 +250,7 @@ func (p *PluginCapabilitySet) HasControllerService() bool {
|
||||
return p.hasControllerService
|
||||
}
|
||||
|
||||
// HasTopologies indicates whether the volumes for this plugin are equally
|
||||
// HasToplogies indicates whether the volumes for this plugin are equally
|
||||
// accessible by all nodes in the cluster.
|
||||
// If true, we MUST use the topology information when scheduling workloads.
|
||||
func (p *PluginCapabilitySet) HasToplogies() bool {
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// PluginFactory returns a new plugin instance
|
||||
type PluginFactory func(log log.Logger) interface{}
|
||||
|
||||
// PluginFactory returns a new plugin instance, that takes in a context
|
||||
// PluginCtxFactory returns a new plugin instance, that takes in a context
|
||||
type PluginCtxFactory func(ctx context.Context, log log.Logger) interface{}
|
||||
|
||||
// Serve is used to serve a new Nomad plugin
|
||||
@@ -26,7 +26,7 @@ func Serve(f PluginFactory) {
|
||||
serve(plugin, logger)
|
||||
}
|
||||
|
||||
// Serve is used to serve a new Nomad plugin
|
||||
// ServeCtx is used to serve a new Nomad plugin
|
||||
func ServeCtx(f PluginCtxFactory) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -133,7 +133,7 @@ func NewBoolAttribute(b bool) *Attribute {
|
||||
}
|
||||
}
|
||||
|
||||
// NewIntergerAttribute returns a new integer attribute. The unit is not checked
|
||||
// NewIntAttribute returns a new integer attribute. The unit is not checked
|
||||
// to be valid.
|
||||
func NewIntAttribute(i int64, unit string) *Attribute {
|
||||
return &Attribute{
|
||||
|
||||
@@ -777,8 +777,8 @@ type PreemptionScoringIterator struct {
|
||||
source RankIterator
|
||||
}
|
||||
|
||||
// PreemptionScoringIterator is used to create a score based on net aggregate priority
|
||||
// of preempted allocations
|
||||
// NewPreemptionScoringIterator is used to create a score based on net
|
||||
// aggregate priority of preempted allocations.
|
||||
func NewPreemptionScoringIterator(ctx Context, source RankIterator) RankIterator {
|
||||
return &PreemptionScoringIterator{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -82,7 +82,7 @@ type MaxScoreIterator struct {
|
||||
max *RankedNode
|
||||
}
|
||||
|
||||
// MaxScoreIterator returns a MaxScoreIterator over the given source
|
||||
// NewMaxScoreIterator returns a MaxScoreIterator over the given source
|
||||
func NewMaxScoreIterator(ctx Context, source RankIterator) *MaxScoreIterator {
|
||||
iter := &MaxScoreIterator{
|
||||
ctx: ctx,
|
||||
|
||||
Reference in New Issue
Block a user