diff --git a/client/allocrunner/taskrunner/template/template.go b/client/allocrunner/taskrunner/template/template.go index e1e49de99..927071d43 100644 --- a/client/allocrunner/taskrunner/template/template.go +++ b/client/allocrunner/taskrunner/template/template.go @@ -470,7 +470,7 @@ func (tm *TaskTemplateManager) onTemplateRendered(handledRenders map[string]time s := tm.signals[signal] event := structs.NewTaskEvent(structs.TaskSignaling).SetTaskSignal(s).SetDisplayMessage("Template re-rendered") if err := tm.config.Lifecycle.Signal(event, signal); err != nil { - multierror.Append(&mErr, err) + _ = multierror.Append(&mErr, err) } } diff --git a/client/client.go b/client/client.go index a1cce6651..27032fc1d 100644 --- a/client/client.go +++ b/client/client.go @@ -1224,7 +1224,7 @@ func (c *Client) saveState() error { if err != nil { c.logger.Error("error saving alloc state", "error", err, "alloc_id", id) l.Lock() - multierror.Append(&mErr, err) + _ = multierror.Append(&mErr, err) l.Unlock() } wg.Done() diff --git a/client/devicemanager/manager.go b/client/devicemanager/manager.go index afc22847b..95f915d8f 100644 --- a/client/devicemanager/manager.go +++ b/client/devicemanager/manager.go @@ -283,13 +283,13 @@ func (m *manager) cleanupStalePlugins() error { for name, c := range s.ReattachConfigs { rc, err := pstructs.ReattachConfigToGoPlugin(c) if err != nil { - multierror.Append(&mErr, fmt.Errorf("failed to convert reattach config: %v", err)) + _ = multierror.Append(&mErr, fmt.Errorf("failed to convert reattach config: %v", err)) continue } instance, err := m.loader.Reattach(name, base.PluginTypeDevice, rc) if err != nil { - multierror.Append(&mErr, fmt.Errorf("failed to reattach to plugin %q: %v", name, err)) + _ = multierror.Append(&mErr, fmt.Errorf("failed to reattach to plugin %q: %v", name, err)) continue } diff --git a/command/job_validate.go b/command/job_validate.go index 1cbd3744c..9bd04fa8c 100644 --- a/command/job_validate.go +++ b/command/job_validate.go @@ -144,7 +144,7 @@ func (c *JobValidateCommand) validateLocal(aj *api.Job) (*api.JobValidateRespons var out api.JobValidateResponse job := agent.ApiJobToStructJob(aj) - canonicalizeWarnings := job.Canonicalize() + job.Canonicalize() if vErr := job.Validate(); vErr != nil { if merr, ok := vErr.(*multierror.Error); ok { @@ -158,7 +158,6 @@ func (c *JobValidateCommand) validateLocal(aj *api.Job) (*api.JobValidateRespons } } - warnings := job.Warnings() - out.Warnings = structs.MergeMultierrorWarnings(warnings, canonicalizeWarnings) + out.Warnings = structs.MergeMultierrorWarnings(job.Warnings()) return &out, nil } diff --git a/command/server_members.go b/command/server_members.go index 06828469c..540796123 100644 --- a/command/server_members.go +++ b/command/server_members.go @@ -197,7 +197,7 @@ func regionLeaders(client *api.Client, mem []*api.AgentMember) (map[string]strin for reg := range regions { l, err := status.RegionLeader(reg) if err != nil { - multierror.Append(&mErr, fmt.Errorf("Region %q: %v", reg, err)) + _ = multierror.Append(&mErr, fmt.Errorf("Region %q: %v", reg, err)) continue } diff --git a/helper/pluginutils/loader/init.go b/helper/pluginutils/loader/init.go index e9e2cc3a9..1cda5b151 100644 --- a/helper/pluginutils/loader/init.go +++ b/helper/pluginutils/loader/init.go @@ -24,13 +24,13 @@ func validateConfig(config *PluginLoaderConfig) error { if config == nil { return fmt.Errorf("nil config passed") } else if config.Logger == nil { - multierror.Append(&mErr, fmt.Errorf("nil logger passed")) + _ = multierror.Append(&mErr, fmt.Errorf("nil logger passed")) } // Validate that all plugins have a binary name for _, c := range config.Configs { if c.Name == "" { - multierror.Append(&mErr, fmt.Errorf("plugin config passed without binary name")) + _ = multierror.Append(&mErr, fmt.Errorf("plugin config passed without binary name")) } } @@ -38,10 +38,10 @@ func validateConfig(config *PluginLoaderConfig) error { for k, config := range config.InternalPlugins { // Validate config if config == nil { - multierror.Append(&mErr, fmt.Errorf("nil config passed for internal plugin %s", k)) + _ = multierror.Append(&mErr, fmt.Errorf("nil config passed for internal plugin %s", k)) continue } else if config.Factory == nil { - multierror.Append(&mErr, fmt.Errorf("nil factory passed for internal plugin %s", k)) + _ = multierror.Append(&mErr, fmt.Errorf("nil factory passed for internal plugin %s", k)) continue } } @@ -96,7 +96,7 @@ func (l *PluginLoader) initInternal(plugins map[PluginID]*InternalPluginConfig, raw := config.Factory(ctx, l.logger) base, ok := raw.(base.BasePlugin) if !ok { - multierror.Append(&mErr, fmt.Errorf("internal plugin %s doesn't meet base plugin interface", k)) + _ = multierror.Append(&mErr, fmt.Errorf("internal plugin %s doesn't meet base plugin interface", k)) continue } @@ -113,7 +113,7 @@ func (l *PluginLoader) initInternal(plugins map[PluginID]*InternalPluginConfig, // Fingerprint base info i, err := base.PluginInfo() if err != nil { - multierror.Append(&mErr, fmt.Errorf("PluginInfo info failed for internal plugin %s: %v", k, err)) + _ = multierror.Append(&mErr, fmt.Errorf("PluginInfo info failed for internal plugin %s: %v", k, err)) continue } info.baseInfo = i @@ -121,7 +121,7 @@ func (l *PluginLoader) initInternal(plugins map[PluginID]*InternalPluginConfig, // Parse and set the plugin version v, err := version.NewVersion(i.PluginVersion) if err != nil { - multierror.Append(&mErr, fmt.Errorf("failed to parse version %q for internal plugin %s: %v", i.PluginVersion, k, err)) + _ = multierror.Append(&mErr, fmt.Errorf("failed to parse version %q for internal plugin %s: %v", i.PluginVersion, k, err)) continue } info.version = v @@ -129,7 +129,7 @@ func (l *PluginLoader) initInternal(plugins map[PluginID]*InternalPluginConfig, // Detect the plugin API version to use av, err := l.selectApiVersion(i) if err != nil { - multierror.Append(&mErr, fmt.Errorf("failed to validate API versions %v for internal plugin %s: %v", i.PluginApiVersions, k, err)) + _ = multierror.Append(&mErr, fmt.Errorf("failed to validate API versions %v for internal plugin %s: %v", i.PluginApiVersions, k, err)) continue } if av == "" { @@ -141,7 +141,7 @@ func (l *PluginLoader) initInternal(plugins map[PluginID]*InternalPluginConfig, // Get the config schema schema, err := base.ConfigSchema() if err != nil { - multierror.Append(&mErr, fmt.Errorf("failed to retrieve config schema for internal plugin %s: %v", k, err)) + _ = multierror.Append(&mErr, fmt.Errorf("failed to retrieve config schema for internal plugin %s: %v", k, err)) continue } info.configSchema = schema @@ -265,7 +265,7 @@ func (l *PluginLoader) fingerprintPlugins(plugins []os.FileInfo, configs map[str info, err := l.fingerprintPlugin(p, c) if err != nil { l.logger.Error("failed to fingerprint plugin", "plugin", name, "error", err) - multierror.Append(&mErr, err) + _ = multierror.Append(&mErr, err) continue } if info == nil { @@ -424,7 +424,7 @@ func (l *PluginLoader) validatePluginConfigs() error { for id, info := range l.plugins { if err := l.validatePluginConfig(id, info); err != nil { wrapped := multierror.Prefix(err, fmt.Sprintf("plugin %s:", id)) - multierror.Append(&mErr, wrapped) + _ = multierror.Append(&mErr, wrapped) } } @@ -450,7 +450,7 @@ func (l *PluginLoader) validatePluginConfig(id PluginID, info *pluginInfo) error // Convert the schema to hcl spec, diag := hclspecutils.Convert(info.configSchema) if diag.HasErrors() { - multierror.Append(&mErr, diag.Errs()...) + _ = multierror.Append(&mErr, diag.Errs()...) return multierror.Prefix(&mErr, "failed converting config schema:") } @@ -463,7 +463,7 @@ func (l *PluginLoader) validatePluginConfig(id PluginID, info *pluginInfo) error // Parse the config using the spec val, diag, diagErrs := hclutils.ParseHclInterface(info.config, spec, nil) if diag.HasErrors() { - multierror.Append(&mErr, diagErrs...) + _ = multierror.Append(&mErr, diagErrs...) return multierror.Prefix(&mErr, "failed to parse config: ") } diff --git a/nomad/eval_endpoint.go b/nomad/eval_endpoint.go index 51a2f6cda..08a3be076 100644 --- a/nomad/eval_endpoint.go +++ b/nomad/eval_endpoint.go @@ -115,12 +115,12 @@ func (e *Eval) Dequeue(args *structs.EvalDequeueRequest, waitIndex, err := e.getWaitIndex(eval.Namespace, eval.JobID, eval.ModifyIndex) if err != nil { var mErr multierror.Error - multierror.Append(&mErr, err) + _ = multierror.Append(&mErr, err) // We have dequeued the evaluation but won't be returning it to the // worker so Nack the eval. if err := e.srv.evalBroker.Nack(eval.ID, token); err != nil { - multierror.Append(&mErr, err) + _ = multierror.Append(&mErr, err) } return &mErr diff --git a/nomad/job_endpoint_hooks.go b/nomad/job_endpoint_hooks.go index 443dee429..a7594ea91 100644 --- a/nomad/job_endpoint_hooks.go +++ b/nomad/job_endpoint_hooks.go @@ -99,14 +99,8 @@ func (jobCanonicalizer) Name() string { } func (jobCanonicalizer) Mutate(job *structs.Job) (*structs.Job, []error, error) { - err := job.Canonicalize() - if err == nil { - return job, nil, nil - } - if me, ok := err.(*multierror.Error); ok { - return job, me.Errors, nil - } - return job, []error{err}, nil + job.Canonicalize() + return job, nil, nil } // jobImpliedConstraints adds constraints to a job implied by other job fields diff --git a/nomad/mock/mock.go b/nomad/mock/mock.go index 0125827ad..ccb8a4c19 100644 --- a/nomad/mock/mock.go +++ b/nomad/mock/mock.go @@ -1256,9 +1256,7 @@ func BatchConnectJob() *structs.Job { ModifyIndex: 99, JobModifyIndex: 99, } - if err := job.Canonicalize(); err != nil { - panic(err) - } + job.Canonicalize() return job } diff --git a/nomad/namespace_endpoint.go b/nomad/namespace_endpoint.go index 7701801a1..351405d3c 100644 --- a/nomad/namespace_endpoint.go +++ b/nomad/namespace_endpoint.go @@ -94,9 +94,9 @@ func (n *Namespace) DeleteNamespaces(args *structs.NamespaceDeleteRequest, reply for _, ns := range args.Namespaces { nonTerminal, err := n.nonTerminalNamespaces(args.AuthToken, ns) if err != nil { - multierror.Append(&mErr, err) + _ = multierror.Append(&mErr, err) } else if len(nonTerminal) != 0 { - multierror.Append(&mErr, fmt.Errorf("namespace %q has non-terminal jobs in regions: %v", ns, nonTerminal)) + _ = multierror.Append(&mErr, fmt.Errorf("namespace %q has non-terminal jobs in regions: %v", ns, nonTerminal)) } } diff --git a/nomad/server.go b/nomad/server.go index 05735fe4a..5fabdc385 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -789,7 +789,7 @@ func (s *Server) Reload(newConfig *Config) error { // Handle the Vault reload. Vault should never be nil but just guard. if s.vault != nil { if err := s.vault.SetConfig(newConfig.VaultConfig); err != nil { - multierror.Append(&mErr, err) + _ = multierror.Append(&mErr, err) } } @@ -801,7 +801,7 @@ func (s *Server) Reload(newConfig *Config) error { if shouldReloadTLS { if err := s.reloadTLSConnections(newConfig.TLSConfig); err != nil { s.logger.Error("error reloading server TLS configuration", "error", err) - multierror.Append(&mErr, err) + _ = multierror.Append(&mErr, err) } } diff --git a/nomad/structs/funcs.go b/nomad/structs/funcs.go index 7d5398133..0f6e5e70a 100644 --- a/nomad/structs/funcs.go +++ b/nomad/structs/funcs.go @@ -18,33 +18,28 @@ import ( // MergeMultierrorWarnings takes job warnings and canonicalize warnings and // merges them into a returnable string. Both the errors may be nil. -func MergeMultierrorWarnings(warnings ...error) string { - var warningMsg multierror.Error - for _, warn := range warnings { - if warn != nil { - multierror.Append(&warningMsg, warn) - } - } - - if len(warningMsg.Errors) == 0 { +func MergeMultierrorWarnings(errs ...error) string { + if len(errs) == 0 { return "" } - // Set the formatter - warningMsg.ErrorFormat = warningsFormatter - return warningMsg.Error() + var mErr multierror.Error + _ = multierror.Append(&mErr, errs...) + mErr.ErrorFormat = warningsFormatter + + return mErr.Error() } // warningsFormatter is used to format job warnings func warningsFormatter(es []error) string { - points := make([]string, len(es)) - for i, err := range es { - points[i] = fmt.Sprintf("* %s", err) + sb := strings.Builder{} + sb.WriteString(fmt.Sprintf("%d warning(s):\n", len(es))) + + for i := range es { + sb.WriteString(fmt.Sprintf("\n* %s", es[i])) } - return fmt.Sprintf( - "%d warning(s):\n\n%s", - len(es), strings.Join(points, "\n")) + return sb.String() } // RemoveAllocs is used to remove any allocs with the given IDs @@ -341,8 +336,8 @@ func ACLPolicyListHash(policies []*ACLPolicy) string { panic(err) } for _, policy := range policies { - cacheKeyHash.Write([]byte(policy.Name)) - binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex) + _, _ = cacheKeyHash.Write([]byte(policy.Name)) + _ = binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex) } cacheKey := string(cacheKeyHash.Sum(nil)) return cacheKey @@ -390,7 +385,9 @@ func GenerateMigrateToken(allocID, nodeSecretID string) (string, error) { if err != nil { return "", err } - h.Write([]byte(allocID)) + + _, _ = h.Write([]byte(allocID)) + return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil } @@ -401,7 +398,8 @@ func CompareMigrateToken(allocID, nodeSecretID, otherMigrateToken string) bool { if err != nil { return false } - h.Write([]byte(allocID)) + + _, _ = h.Write([]byte(allocID)) otherBytes, err := base64.URLEncoding.DecodeString(otherMigrateToken) if err != nil { diff --git a/nomad/structs/funcs_test.go b/nomad/structs/funcs_test.go index 504cc3a8e..2893c5264 100644 --- a/nomad/structs/funcs_test.go +++ b/nomad/structs/funcs_test.go @@ -2,6 +2,7 @@ package structs import ( "encoding/base64" + "errors" "fmt" "testing" @@ -335,8 +336,8 @@ func TestAllocsFit(t *testing.T) { DiskMB: 5000, Networks: Networks{ { - Mode: "host", - IP: "10.0.0.1", + Mode: "host", + IP: "10.0.0.1", ReservedPorts: []Port{{"main", 8000, 0, ""}}, }, }, @@ -776,3 +777,22 @@ func TestGenerateMigrateToken(t *testing.T) { assert.False(CompareMigrateToken(allocID, nodeSecret, token2)) assert.True(CompareMigrateToken("x", nodeSecret, token2)) } + +func TestMergeMultierrorWarnings(t *testing.T) { + var errs []error + + // empty + str := MergeMultierrorWarnings(errs...) + require.Equal(t, "", str) + + // non-empty + errs = []error{ + errors.New("foo"), + nil, + errors.New("bar"), + } + + str = MergeMultierrorWarnings(errs...) + + require.Equal(t, "2 warning(s):\n\n* foo\n* bar", str) +} diff --git a/nomad/structs/streaming_rpc.go b/nomad/structs/streaming_rpc.go index 42559d408..68573cd88 100644 --- a/nomad/structs/streaming_rpc.go +++ b/nomad/structs/streaming_rpc.go @@ -58,13 +58,13 @@ func Bridge(a, b io.ReadWriteCloser) { wg.Add(2) go func() { defer wg.Done() - io.Copy(a, b) + _, _ = io.Copy(a, b) a.Close() b.Close() }() go func() { defer wg.Done() - io.Copy(b, a) + _, _ = io.Copy(b, a) a.Close() b.Close() }() diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index fd4000c51..ec905c06e 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -2379,10 +2379,11 @@ func (r *Resources) Superset(other *Resources) (bool, string) { // Add adds the resources of the delta to this, potentially // returning an error if not possible. // COMPAT(0.10): Remove in 0.10 -func (r *Resources) Add(delta *Resources) error { +func (r *Resources) Add(delta *Resources) { if delta == nil { - return nil + return } + r.CPU += delta.CPU r.MemoryMB += delta.MemoryMB r.DiskMB += delta.DiskMB @@ -2396,7 +2397,6 @@ func (r *Resources) Add(delta *Resources) error { r.Networks[idx].Add(n) } } - return nil } // COMPAT(0.10): Remove in 0.10 @@ -2742,7 +2742,7 @@ func (r *RequestedDevice) Validate() error { var mErr multierror.Error if r.Name == "" { - multierror.Append(&mErr, errors.New("device name must be given as one of the following: type, vendor/type, or vendor/type/name")) + _ = multierror.Append(&mErr, errors.New("device name must be given as one of the following: type, vendor/type, or vendor/type/name")) } for idx, constr := range r.Constraints { @@ -2750,18 +2750,18 @@ func (r *RequestedDevice) Validate() error { switch constr.Operand { case ConstraintDistinctHosts, ConstraintDistinctProperty: outer := fmt.Errorf("Constraint %d validation failed: using unsupported operand %q", idx+1, constr.Operand) - multierror.Append(&mErr, outer) + _ = multierror.Append(&mErr, outer) default: if err := constr.Validate(); err != nil { outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) - multierror.Append(&mErr, outer) + _ = multierror.Append(&mErr, outer) } } } for idx, affinity := range r.Affinities { if err := affinity.Validate(); err != nil { outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err) - multierror.Append(&mErr, outer) + _ = multierror.Append(&mErr, outer) } } @@ -4000,15 +4000,13 @@ func (j *Job) NamespacedID() *NamespacedID { } } -// Canonicalize is used to canonicalize fields in the Job. This should be called -// when registering a Job. A set of warnings are returned if the job was changed -// in anyway that the user should be made aware of. -func (j *Job) Canonicalize() (warnings error) { +// Canonicalize is used to canonicalize fields in the Job. This should be +// called when registering a Job. +func (j *Job) Canonicalize() { if j == nil { - return nil + return } - var mErr multierror.Error // Ensure that an empty and nil map are treated the same to avoid scheduling // problems since we use reflect DeepEquals. if len(j.Meta) == 0 { @@ -4035,8 +4033,6 @@ func (j *Job) Canonicalize() (warnings error) { if j.Periodic != nil { j.Periodic.Canonicalize() } - - return mErr.ErrorOrNil() } // Copy returns a deep copy of the Job. It is expected that callers use recover. @@ -4669,35 +4665,35 @@ func (u *UpdateStrategy) Validate() error { switch u.HealthCheck { case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual: default: - multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck)) + _ = multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck)) } if u.MaxParallel < 0 { - multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than zero: %d < 0", u.MaxParallel)) + _ = multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than zero: %d < 0", u.MaxParallel)) } if u.Canary < 0 { - multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary)) + _ = multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary)) } if u.Canary == 0 && u.AutoPromote { - multierror.Append(&mErr, fmt.Errorf("Auto Promote requires a Canary count greater than zero")) + _ = multierror.Append(&mErr, fmt.Errorf("Auto Promote requires a Canary count greater than zero")) } if u.MinHealthyTime < 0 { - multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime)) + _ = multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime)) } if u.HealthyDeadline <= 0 { - multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline)) + _ = multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline)) } if u.ProgressDeadline < 0 { - multierror.Append(&mErr, fmt.Errorf("Progress deadline must be zero or greater: %v", u.ProgressDeadline)) + _ = multierror.Append(&mErr, fmt.Errorf("Progress deadline must be zero or greater: %v", u.ProgressDeadline)) } if u.MinHealthyTime >= u.HealthyDeadline { - multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline)) + _ = multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline)) } if u.ProgressDeadline != 0 && u.HealthyDeadline >= u.ProgressDeadline { - multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be less than progress deadline: %v > %v", u.HealthyDeadline, u.ProgressDeadline)) + _ = multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be less than progress deadline: %v > %v", u.HealthyDeadline, u.ProgressDeadline)) } if u.Stagger <= 0 { - multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger)) + _ = multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger)) } return mErr.ErrorOrNil() @@ -4821,9 +4817,9 @@ func (n *Namespace) SetHash() []byte { } // Write all the user set fields - hash.Write([]byte(n.Name)) - hash.Write([]byte(n.Description)) - hash.Write([]byte(n.Quota)) + _, _ = hash.Write([]byte(n.Name)) + _, _ = hash.Write([]byte(n.Description)) + _, _ = hash.Write([]byte(n.Quota)) // Finalize the hash hashVal := hash.Sum(nil) @@ -4939,13 +4935,13 @@ func (p *PeriodicConfig) Validate() error { var mErr multierror.Error if p.Spec == "" { - multierror.Append(&mErr, fmt.Errorf("Must specify a spec")) + _ = multierror.Append(&mErr, fmt.Errorf("Must specify a spec")) } // Check if we got a valid time zone if p.TimeZone != "" { if _, err := time.LoadLocation(p.TimeZone); err != nil { - multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err)) + _ = multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err)) } } @@ -4953,12 +4949,12 @@ func (p *PeriodicConfig) Validate() error { case PeriodicSpecCron: // Validate the cron spec if _, err := cronexpr.Parse(p.Spec); err != nil { - multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err)) + _ = multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err)) } case PeriodicSpecTest: // No-op default: - multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType)) + _ = multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType)) } return mErr.ErrorOrNil() @@ -5082,13 +5078,13 @@ func (d *ParameterizedJobConfig) Validate() error { switch d.Payload { case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden: default: - multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload)) + _ = multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload)) } // Check that the meta configurations are disjoint sets disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional) if !disjoint { - multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending)) + _ = multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending)) } return mErr.ErrorOrNil() @@ -5535,19 +5531,19 @@ func (r *RestartPolicy) Validate() error { switch r.Mode { case RestartPolicyModeDelay, RestartPolicyModeFail: default: - multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode)) + _ = multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode)) } // Check for ambiguous/confusing settings if r.Attempts == 0 && r.Mode != RestartPolicyModeFail { - multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts)) + _ = multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts)) } if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() { - multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval)) + _ = multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval)) } if time.Duration(r.Attempts)*r.Delay > r.Interval { - multierror.Append(&mErr, + _ = multierror.Append(&mErr, fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay)) } return mErr.ErrorOrNil() @@ -5620,36 +5616,36 @@ func (r *ReschedulePolicy) Validate() error { // Check for ambiguous/confusing settings if r.Attempts > 0 { if r.Interval <= 0 { - multierror.Append(&mErr, fmt.Errorf("Interval must be a non zero value if Attempts > 0")) + _ = multierror.Append(&mErr, fmt.Errorf("Interval must be a non zero value if Attempts > 0")) } if r.Unlimited { - multierror.Append(&mErr, fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, "+ + _ = multierror.Append(&mErr, fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, "+ "and Unlimited = %v is ambiguous", r.Attempts, r.Interval, r.Unlimited)) - multierror.Append(&mErr, errors.New("If Attempts >0, Unlimited cannot also be set to true")) + _ = multierror.Append(&mErr, errors.New("If Attempts >0, Unlimited cannot also be set to true")) } } delayPreCheck := true // Delay should be bigger than the default if r.Delay.Nanoseconds() < ReschedulePolicyMinDelay.Nanoseconds() { - multierror.Append(&mErr, fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay)) + _ = multierror.Append(&mErr, fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay)) delayPreCheck = false } // Must use a valid delay function if !isValidDelayFunction(r.DelayFunction) { - multierror.Append(&mErr, fmt.Errorf("Invalid delay function %q, must be one of %q", r.DelayFunction, RescheduleDelayFunctions)) + _ = multierror.Append(&mErr, fmt.Errorf("Invalid delay function %q, must be one of %q", r.DelayFunction, RescheduleDelayFunctions)) delayPreCheck = false } // Validate MaxDelay if not using linear delay progression if r.DelayFunction != "constant" { if r.MaxDelay.Nanoseconds() < ReschedulePolicyMinDelay.Nanoseconds() { - multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay)) + _ = multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay)) delayPreCheck = false } if r.MaxDelay < r.Delay { - multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", r.Delay, r.MaxDelay)) + _ = multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", r.Delay, r.MaxDelay)) delayPreCheck = false } @@ -5658,7 +5654,7 @@ func (r *ReschedulePolicy) Validate() error { // Validate Interval and other delay parameters if attempts are limited if !r.Unlimited { if r.Interval.Nanoseconds() < ReschedulePolicyMinInterval.Nanoseconds() { - multierror.Append(&mErr, fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, r.Interval)) + _ = multierror.Append(&mErr, fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, r.Interval)) } if !delayPreCheck { // We can't cross validate the rest of the delay params if delayPreCheck fails, so return early @@ -5666,7 +5662,7 @@ func (r *ReschedulePolicy) Validate() error { } crossValidationErr := r.validateDelayParams() if crossValidationErr != nil { - multierror.Append(&mErr, crossValidationErr) + _ = multierror.Append(&mErr, crossValidationErr) } } return mErr.ErrorOrNil() @@ -5688,13 +5684,13 @@ func (r *ReschedulePolicy) validateDelayParams() error { } var mErr multierror.Error if r.DelayFunction == "constant" { - multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and "+ + _ = multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and "+ "delay function %q", possibleAttempts, r.Interval, r.Delay, r.DelayFunction)) } else { - multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ + _ = multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ "delay function %q, and delay ceiling %v", possibleAttempts, r.Interval, r.Delay, r.DelayFunction, r.MaxDelay)) } - multierror.Append(&mErr, fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", recommendedInterval.Round(time.Second), r.Attempts)) + _ = multierror.Append(&mErr, fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", recommendedInterval.Round(time.Second), r.Attempts)) return mErr.ErrorOrNil() } @@ -5805,7 +5801,7 @@ func (m *MigrateStrategy) Validate() error { var mErr multierror.Error if m.MaxParallel < 0 { - multierror.Append(&mErr, fmt.Errorf("MaxParallel must be >= 0 but found %d", m.MaxParallel)) + _ = multierror.Append(&mErr, fmt.Errorf("MaxParallel must be >= 0 but found %d", m.MaxParallel)) } switch m.HealthCheck { @@ -5813,22 +5809,22 @@ func (m *MigrateStrategy) Validate() error { // ok case "": if m.MaxParallel > 0 { - multierror.Append(&mErr, fmt.Errorf("Missing HealthCheck")) + _ = multierror.Append(&mErr, fmt.Errorf("Missing HealthCheck")) } default: - multierror.Append(&mErr, fmt.Errorf("Invalid HealthCheck: %q", m.HealthCheck)) + _ = multierror.Append(&mErr, fmt.Errorf("Invalid HealthCheck: %q", m.HealthCheck)) } if m.MinHealthyTime < 0 { - multierror.Append(&mErr, fmt.Errorf("MinHealthyTime is %s and must be >= 0", m.MinHealthyTime)) + _ = multierror.Append(&mErr, fmt.Errorf("MinHealthyTime is %s and must be >= 0", m.MinHealthyTime)) } if m.HealthyDeadline < 0 { - multierror.Append(&mErr, fmt.Errorf("HealthyDeadline is %s and must be >= 0", m.HealthyDeadline)) + _ = multierror.Append(&mErr, fmt.Errorf("HealthyDeadline is %s and must be >= 0", m.HealthyDeadline)) } if m.MinHealthyTime > m.HealthyDeadline { - multierror.Append(&mErr, fmt.Errorf("MinHealthyTime must be less than HealthyDeadline")) + _ = multierror.Append(&mErr, fmt.Errorf("MinHealthyTime must be less than HealthyDeadline")) } return mErr.ErrorOrNil() @@ -7295,12 +7291,12 @@ func (t *Template) Validate() error { // Verify we have something to render if t.SourcePath == "" && t.EmbeddedTmpl == "" { - multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template")) + _ = multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template")) } // Verify we can render somewhere if t.DestPath == "" { - multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template")) + _ = multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template")) } // Verify the destination doesn't escape @@ -7316,24 +7312,24 @@ func (t *Template) Validate() error { case TemplateChangeModeNoop, TemplateChangeModeRestart: case TemplateChangeModeSignal: if t.ChangeSignal == "" { - multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal")) + _ = multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal")) } if t.Envvars { - multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates")) + _ = multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates")) } default: - multierror.Append(&mErr, TemplateChangeModeInvalidError) + _ = multierror.Append(&mErr, TemplateChangeModeInvalidError) } // Verify the splay is positive if t.Splay < 0 { - multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value")) + _ = multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value")) } // Verify the permissions if t.Perms != "" { if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil { - multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err)) + _ = multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err)) } } @@ -7966,8 +7962,8 @@ func hashStringMap(h hash.Hash, m map[string]string) { } sort.Strings(keys) for _, k := range keys { - h.Write([]byte(k)) - h.Write([]byte(m[k])) + _, _ = h.Write([]byte(k)) + _, _ = h.Write([]byte(m[k])) } } @@ -7979,13 +7975,13 @@ func (ta *TaskArtifact) Hash() string { panic(err) } - h.Write([]byte(ta.GetterSource)) + _, _ = h.Write([]byte(ta.GetterSource)) hashStringMap(h, ta.GetterOptions) hashStringMap(h, ta.GetterHeaders) - h.Write([]byte(ta.GetterMode)) - h.Write([]byte(ta.RelativeDest)) + _, _ = h.Write([]byte(ta.GetterMode)) + _, _ = h.Write([]byte(ta.RelativeDest)) return base64.RawStdEncoding.EncodeToString(h.Sum(nil)) } @@ -8545,23 +8541,23 @@ func (v *Vault) Validate() error { var mErr multierror.Error if len(v.Policies) == 0 { - multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty")) + _ = multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty")) } for _, p := range v.Policies { if p == "root" { - multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy")) + _ = multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy")) } } switch v.ChangeMode { case VaultChangeModeSignal: if v.ChangeSignal == "" { - multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal)) + _ = multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal)) } case VaultChangeModeNoop, VaultChangeModeRestart: default: - multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode)) + _ = multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode)) } return mErr.ErrorOrNil() @@ -10740,9 +10736,9 @@ func (c *ACLPolicy) SetHash() []byte { } // Write all the user set fields - hash.Write([]byte(c.Name)) - hash.Write([]byte(c.Description)) - hash.Write([]byte(c.Rules)) + _, _ = hash.Write([]byte(c.Name)) + _, _ = hash.Write([]byte(c.Description)) + _, _ = hash.Write([]byte(c.Rules)) // Finalize the hash hashVal := hash.Sum(nil) @@ -10894,15 +10890,15 @@ func (a *ACLToken) SetHash() []byte { } // Write all the user set fields - hash.Write([]byte(a.Name)) - hash.Write([]byte(a.Type)) + _, _ = hash.Write([]byte(a.Name)) + _, _ = hash.Write([]byte(a.Type)) for _, policyName := range a.Policies { - hash.Write([]byte(policyName)) + _, _ = hash.Write([]byte(policyName)) } if a.Global { - hash.Write([]byte("global")) + _, _ = hash.Write([]byte("global")) } else { - hash.Write([]byte("local")) + _, _ = hash.Write([]byte("local")) } // Finalize the hash diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index 600a59d90..a313a73b3 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -2645,10 +2645,7 @@ func TestResource_Add(t *testing.T) { }, } - err := r1.Add(r2) - if err != nil { - t.Fatalf("Err: %v", err) - } + r1.Add(r2) expect := &Resources{ CPU: 3000, @@ -2687,14 +2684,8 @@ func TestResource_Add_Network(t *testing.T) { }, } - err := r1.Add(r2) - if err != nil { - t.Fatalf("Err: %v", err) - } - err = r1.Add(r3) - if err != nil { - t.Fatalf("Err: %v", err) - } + r1.Add(r2) + r1.Add(r3) expect := &Resources{ Networks: []*NetworkResource{ diff --git a/nomad/structs/testing.go b/nomad/structs/testing.go index 7f4435366..4a4a512bc 100644 --- a/nomad/structs/testing.go +++ b/nomad/structs/testing.go @@ -90,7 +90,10 @@ func MockNode() *Node { Status: NodeStatusReady, SchedulingEligibility: NodeSchedulingEligible, } - node.ComputeClass() + err := node.ComputeClass() + if err != nil { + panic(fmt.Sprintf("failed to compute node class: %v", err)) + } return node } @@ -120,7 +123,10 @@ func MockNvidiaNode() *Node { }, }, } - n.ComputeClass() + err := n.ComputeClass() + if err != nil { + panic(fmt.Sprintf("failed to compute node class: %v", err)) + } return n } diff --git a/nomad/vault.go b/nomad/vault.go index 6e6dc9041..3fb4abcde 100644 --- a/nomad/vault.go +++ b/nomad/vault.go @@ -735,43 +735,43 @@ func (v *vaultClient) parseSelfToken() error { if !data.Root { // All non-root tokens must be renewable if !data.Renewable { - multierror.Append(&mErr, fmt.Errorf("Vault token is not renewable or root")) + _ = multierror.Append(&mErr, fmt.Errorf("Vault token is not renewable or root")) } // All non-root tokens must have a lease duration if data.CreationTTL == 0 { - multierror.Append(&mErr, fmt.Errorf("invalid lease duration of zero")) + _ = multierror.Append(&mErr, fmt.Errorf("invalid lease duration of zero")) } // The lease duration can not be expired if data.TTL == 0 { - multierror.Append(&mErr, fmt.Errorf("token TTL is zero")) + _ = multierror.Append(&mErr, fmt.Errorf("token TTL is zero")) } // There must be a valid role since we aren't root if role == "" { - multierror.Append(&mErr, fmt.Errorf("token role name must be set when not using a root token")) + _ = multierror.Append(&mErr, fmt.Errorf("token role name must be set when not using a root token")) } } else if data.CreationTTL != 0 { // If the root token has a TTL it must be renewable if !data.Renewable { - multierror.Append(&mErr, fmt.Errorf("Vault token has a TTL but is not renewable")) + _ = multierror.Append(&mErr, fmt.Errorf("Vault token has a TTL but is not renewable")) } else if data.TTL == 0 { // If the token has a TTL make sure it has not expired - multierror.Append(&mErr, fmt.Errorf("token TTL is zero")) + _ = multierror.Append(&mErr, fmt.Errorf("token TTL is zero")) } } // Check we have the correct capabilities if err := v.validateCapabilities(role, data.Root); err != nil { - multierror.Append(&mErr, err) + _ = multierror.Append(&mErr, err) } // If given a role validate it if role != "" { if err := v.validateRole(role); err != nil { - multierror.Append(&mErr, err) + _ = multierror.Append(&mErr, err) } } @@ -829,7 +829,7 @@ func (v *vaultClient) validateCapabilities(role string, root bool) error { v.logger.Warn(msg) return nil } else { - multierror.Append(&mErr, err) + _ = multierror.Append(&mErr, err) } } @@ -838,9 +838,9 @@ func (v *vaultClient) validateCapabilities(role string, root bool) error { verify := func(path string, requiredCaps []string) { ok, caps, err := v.hasCapability(path, requiredCaps) if err != nil { - multierror.Append(&mErr, err) + _ = multierror.Append(&mErr, err) } else if !ok { - multierror.Append(&mErr, + _ = multierror.Append(&mErr, fmt.Errorf("token must have one of the following capabilities %q on %q; has %v", requiredCaps, path, caps)) } } @@ -918,15 +918,15 @@ func (v *vaultClient) validateRole(role string) error { // Validate the role is acceptable var mErr multierror.Error if !data.Renewable { - multierror.Append(&mErr, fmt.Errorf("Role must allow tokens to be renewed")) + _ = multierror.Append(&mErr, fmt.Errorf("Role must allow tokens to be renewed")) } if data.ExplicitMaxTtl != 0 || data.TokenExplicitMaxTtl != 0 { - multierror.Append(&mErr, fmt.Errorf("Role can not use an explicit max ttl. Token must be periodic.")) + _ = multierror.Append(&mErr, fmt.Errorf("Role can not use an explicit max ttl. Token must be periodic.")) } if data.Period == 0 && data.TokenPeriod == 0 { - multierror.Append(&mErr, fmt.Errorf("Role must have a non-zero period to make tokens periodic.")) + _ = multierror.Append(&mErr, fmt.Errorf("Role must have a non-zero period to make tokens periodic.")) } return mErr.ErrorOrNil() diff --git a/plugins/device/device.go b/plugins/device/device.go index 32fa16133..fec6f258f 100644 --- a/plugins/device/device.go +++ b/plugins/device/device.go @@ -86,29 +86,29 @@ func (d *DeviceGroup) Validate() error { var mErr multierror.Error if d.Vendor == "" { - multierror.Append(&mErr, fmt.Errorf("device vendor must be specified")) + _ = multierror.Append(&mErr, fmt.Errorf("device vendor must be specified")) } if d.Type == "" { - multierror.Append(&mErr, fmt.Errorf("device type must be specified")) + _ = multierror.Append(&mErr, fmt.Errorf("device type must be specified")) } if d.Name == "" { - multierror.Append(&mErr, fmt.Errorf("device name must be specified")) + _ = multierror.Append(&mErr, fmt.Errorf("device name must be specified")) } for i, dev := range d.Devices { if dev == nil { - multierror.Append(&mErr, fmt.Errorf("device %d is nil", i)) + _ = multierror.Append(&mErr, fmt.Errorf("device %d is nil", i)) continue } if err := dev.Validate(); err != nil { - multierror.Append(&mErr, multierror.Prefix(err, fmt.Sprintf("device %d: ", i))) + _ = multierror.Append(&mErr, multierror.Prefix(err, fmt.Sprintf("device %d: ", i))) } } for k, v := range d.Attributes { if err := v.Validate(); err != nil { - multierror.Append(&mErr, fmt.Errorf("device attribute %q invalid: %v", k, err)) + _ = multierror.Append(&mErr, fmt.Errorf("device attribute %q invalid: %v", k, err)) } }